source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template<typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu> *s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template<typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu> *s,
const DType *input,
DType *output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template<typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s,
const DType *out_grad,
DType *in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &shp = (*in_attrs)[0];
if (!shape_is_known(shp)) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<typename DType, int req>
inline void ToTensor(float* out_data, const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel,
normalize_factor, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>
(s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>
(s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length,
channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel,
normalize_factor, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
mxnet::Tuple<float> mean;
mxnet::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(mxnet::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(mxnet::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
int nchannels = 0;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + c*length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step,
mean, std);
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[2] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *input = nullptr;
DType *output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s, input, output, req_type,
N, C, H, W,
mean[0], mean[1], mean[2],
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std);
}
}
}
// Backward function
template<typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(in_grad[step + c*length + i], req,
out_grad[step + c*length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> std
) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length,
channels, step, std);
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *in_grad = nullptr;
DType *out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type,
N, C, H, W,
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
mxnet::TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const mxnet::TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (int i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
struct RandomFlipParam : public dmlc::Parameter<RandomFlipParam> {
float p;
DMLC_DECLARE_PARAMETER(RandomFlipParam) {
DMLC_DECLARE_FIELD(p)
.set_default(0.5f)
.describe("The probablity of flipping the image.");
}
};
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomFlipParam ¶m = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomFlipParam ¶m = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
mxnet::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
boundary_matrix.h | /* Copyright 2013 IST Austria
Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include <phat/helpers/misc.h>
#include <phat/representations/bit_tree_pivot_column.h>
// interface class for the main data structure -- implementations of the interface can be found in ./representations
namespace phat {
template< class Representation = bit_tree_pivot_column >
class boundary_matrix
{
protected:
Representation rep;
// interface functions -- actual implementation and complexity depends on chosen @Representation template
public:
// get overall number of columns in boundary_matrix
index get_num_cols() const { return rep._get_num_cols(); }
// set overall number of columns in boundary_matrix
void set_num_cols( index nr_of_columns ) { rep._set_num_cols( nr_of_columns ); }
// get dimension of given index
dimension get_dim( index idx ) const { return rep._get_dim( idx ); }
// set dimension of given index
void set_dim( index idx, dimension dim ) { rep._set_dim( idx, dim ); }
// replaces content of @col with boundary of given index
void get_col( index idx, column& col ) const { col.clear(); rep._get_col( idx, col ); }
// set column @idx to the values contained in @col
void set_col( index idx, const column& col ) { rep._set_col( idx, col ); }
// true iff boundary of given column is empty
bool is_empty( index idx ) const { return rep._is_empty( idx ); }
// largest index of given column (new name for lowestOne()) -- NOT thread-safe
index get_max_index( index idx ) const { return rep._get_max_index( idx ); }
// removes maximal index from given column
void remove_max( index idx ) { rep._remove_max( idx ); }
// adds column @source to column @target'
void add_to( index source, index target ) { rep._add_to( source, target ); }
// clears given column
void clear( index idx ) { rep._clear( idx ); }
// finalizes given column
void finalize( index idx ) { rep._finalize( idx ); }
// syncronizes all internal data structures -- has to be called before and after any multithreaded access!
void sync() { rep._sync(); }
// info functions -- independent of chosen 'Representation'
public:
// maximal dimension
dimension get_max_dim() const {
dimension cur_max_dim = 0;
for( index idx = 0; idx < get_num_cols(); idx++ )
cur_max_dim = get_dim( idx ) > cur_max_dim ? get_dim( idx ) : cur_max_dim;
return cur_max_dim;
}
// number of nonzero rows for given column @idx
index get_num_rows( index idx ) const {
column cur_col;
get_col( idx, cur_col );
return cur_col.size();
}
// maximal number of nonzero rows of all columns
index get_max_col_entries() const {
index max_col_entries = -1;
const index nr_of_columns = get_num_cols();
for( index idx = 0; idx < nr_of_columns; idx++ )
max_col_entries = get_num_rows( idx ) > max_col_entries ? get_num_rows( idx ) : max_col_entries;
return max_col_entries;
}
// maximal number of nonzero cols of all rows
index get_max_row_entries() const {
size_t max_row_entries = 0;
const index nr_of_columns = get_num_cols();
std::vector< std::vector< index > > transposed_matrix( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
get_col( cur_col, temp_col );
for( index idx = 0; idx < (index)temp_col.size(); idx++)
transposed_matrix[ temp_col[ idx ] ].push_back( cur_col );
}
for( index idx = 0; idx < nr_of_columns; idx++ )
max_row_entries = transposed_matrix[ idx ].size() > max_row_entries ? transposed_matrix[ idx ].size() : max_row_entries;
return max_row_entries;
}
// overall number of entries in the matrix
index get_num_entries() const {
index number_of_nonzero_entries = 0;
const index nr_of_columns = get_num_cols();
for( index idx = 0; idx < nr_of_columns; idx++ )
number_of_nonzero_entries += get_num_rows( idx );
return number_of_nonzero_entries;
}
// operators / constructors
public:
boundary_matrix() {};
template< class OtherRepresentation >
boundary_matrix( const boundary_matrix< OtherRepresentation >& other ) {
*this = other;
}
template< typename OtherRepresentation >
bool operator==( const boundary_matrix< OtherRepresentation >& other_boundary_matrix ) const {
const index number_of_columns = this->get_num_cols();
if( number_of_columns != other_boundary_matrix.get_num_cols() )
return false;
column temp_col;
column other_temp_col;
for( index idx = 0; idx < number_of_columns; idx++ ) {
this->get_col( idx, temp_col );
other_boundary_matrix.get_col( idx, other_temp_col );
if( temp_col != other_temp_col || this->get_dim( idx ) != other_boundary_matrix.get_dim( idx ) )
return false;
}
return true;
}
template< typename OtherRepresentation >
bool operator!=( const boundary_matrix< OtherRepresentation >& other_boundary_matrix ) const {
return !( *this == other_boundary_matrix );
}
template< typename OtherRepresentation >
boundary_matrix< Representation >& operator=( const boundary_matrix< OtherRepresentation >& other )
{
const index nr_of_columns = other.get_num_cols();
this->set_num_cols( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
this->set_dim( cur_col, other.get_dim( cur_col ) );
other.get_col( cur_col, temp_col );
this->set_col( cur_col, temp_col );
}
// by convention, always return *this
return *this;
}
// I/O -- independent of chosen 'Representation'
public:
// initializes boundary_matrix from (vector<vector>, vector) pair -- untested
template< typename index_type, typename dimemsion_type >
void load_vector_vector( const std::vector< std::vector< index_type > >& input_matrix, const std::vector< dimemsion_type >& input_dims ) {
const index nr_of_columns = (index)input_matrix.size();
this->set_num_cols( nr_of_columns );
column temp_col;
#pragma omp parallel for private( temp_col )
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
this->set_dim( cur_col, (dimension)input_dims[ cur_col ] );
index num_rows = input_matrix[ cur_col ].size();
temp_col.resize( num_rows );
for( index cur_row = 0; cur_row < num_rows; cur_row++ )
temp_col[ cur_row ] = (index)input_matrix[ cur_col ][ cur_row ];
this->set_col( cur_col, temp_col );
}
}
template< typename index_type, typename dimemsion_type >
void save_vector_vector( std::vector< std::vector< index_type > >& output_matrix, std::vector< dimemsion_type >& output_dims ) {
const index nr_of_columns = get_num_cols();
output_matrix.resize( nr_of_columns );
output_dims.resize( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
output_dims[ cur_col ] = (dimemsion_type)get_dim( cur_col );
get_col( cur_col, temp_col );
index num_rows = temp_col.size();
output_matrix[ cur_col ].clear();
output_matrix[ cur_col ].resize( num_rows );
for( index cur_row = 0; cur_row < num_rows; cur_row++ )
output_matrix[ cur_col ][ cur_row ] = (index_type)temp_col[ cur_row ];
}
}
// Loads the boundary_matrix from given file in ascii format
// Format: each line represents a column, first number is dimension, other numbers are the content of the column.
// Ignores empty lines and lines starting with a '#'.
bool load_ascii( std::string filename ) {
// first count number of columns:
std::string cur_line;
std::ifstream dummy( filename .c_str() );
if( dummy.fail() )
return false;
index number_of_columns = 0;
while( getline( dummy, cur_line ) ) {
cur_line.erase(cur_line.find_last_not_of(" \t\n\r\f\v") + 1);
if( cur_line != "" && cur_line[ 0 ] != '#' )
number_of_columns++;
}
this->set_num_cols( number_of_columns );
dummy.close();
std::ifstream input_stream( filename.c_str() );
if( input_stream.fail() )
return false;
column temp_col;
index cur_col = -1;
while( getline( input_stream, cur_line ) ) {
cur_line.erase(cur_line.find_last_not_of(" \t\n\r\f\v") + 1);
if( cur_line != "" && cur_line[ 0 ] != '#' ) {
cur_col++;
std::stringstream ss( cur_line );
int64_t temp_dim;
ss >> temp_dim;
this->set_dim( cur_col, (dimension) temp_dim );
int64_t temp_index;
temp_col.clear();
while( ss.good() ) {
ss >> temp_index;
temp_col.push_back( (index)temp_index );
}
std::sort( temp_col.begin(), temp_col.end() );
this->set_col( cur_col, temp_col );
}
}
input_stream.close();
return true;
}
// Saves the boundary_matrix to given file in ascii format
// Format: each line represents a column, first number is dimension, other numbers are the content of the column
bool save_ascii( std::string filename ) {
std::ofstream output_stream( filename.c_str() );
if( output_stream.fail() )
return false;
const index nr_columns = this->get_num_cols();
column tempCol;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
output_stream << (int64_t)this->get_dim( cur_col );
this->get_col( cur_col, tempCol );
for( index cur_row_idx = 0; cur_row_idx < (index)tempCol.size(); cur_row_idx++ )
output_stream << " " << tempCol[ cur_row_idx ];
output_stream << std::endl;
}
output_stream.close();
return true;
}
// Loads boundary_matrix from given file
// Format: nr_columns % dim1 % N1 % row1 row2 % ...% rowN1 % dim2 % N2 % ...
bool load_binary( std::string filename )
{
std::ifstream input_stream( filename.c_str( ), std::ios_base::binary | std::ios_base::in );
if( input_stream.fail( ) )
return false;
int64_t nr_columns;
input_stream.read( (char*)&nr_columns, sizeof( int64_t ) );
this->set_num_cols( (index)nr_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
int64_t cur_dim;
input_stream.read( (char*)&cur_dim, sizeof( int64_t ) );
this->set_dim( cur_col, (dimension)cur_dim );
int64_t nr_rows;
input_stream.read( (char*)&nr_rows, sizeof( int64_t ) );
temp_col.resize( ( std::size_t )nr_rows );
for( index idx = 0; idx < nr_rows; idx++ ) {
int64_t cur_row;
input_stream.read( (char*)&cur_row, sizeof( int64_t ) );
temp_col[ idx ] = (index)cur_row;
}
this->set_col( cur_col, temp_col );
}
input_stream.close( );
return true;
}
// Saves the boundary_matrix to given file in binary format
// Format: nr_columns % dim1 % N1 % row1 row2 % ...% rowN1 % dim2 % N2 % ...
bool save_binary( std::string filename )
{
std::ofstream output_stream( filename.c_str( ), std::ios_base::binary | std::ios_base::out );
if( output_stream.fail( ) )
return false;
const int64_t nr_columns = this->get_num_cols( );
output_stream.write( (char*)&nr_columns, sizeof( int64_t ) );
column tempCol;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
int64_t cur_dim = this->get_dim( cur_col );
output_stream.write( (char*)&cur_dim, sizeof( int64_t ) );
this->get_col( cur_col, tempCol );
int64_t cur_nr_rows = tempCol.size( );
output_stream.write( (char*)&cur_nr_rows, sizeof( int64_t ) );
for( index cur_row_idx = 0; cur_row_idx < (index)tempCol.size( ); cur_row_idx++ ) {
int64_t cur_row = tempCol[ cur_row_idx ];
output_stream.write( (char*)&cur_row, sizeof( int64_t ) );
}
}
output_stream.close( );
return true;
}
};
}
|
GB_unop__identity_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_uint32
// op(A') function: GB_unop_tran__identity_bool_uint32
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_uint32
(
bool *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
essai.c | void functionA() {
}
void functionB() {
}
int main() {
//#pragma omp parallel sections
#pragma omp parallel
#pragma omp sections
{
#pragma omp section
{
functionA();
}
#pragma omp section
{
functionB();
}
}
}
|
dz2z3.c | #include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
char res_seq[100];
char res_par[100];
#define DIM 2 /* Two-dimensional system */
#define X 0 /* x-coordinate subscript */
#define Y 1 /* y-coordinate subscript */
const double G = 6.673e-11;
typedef double vect_t[DIM]; /* Vector type for position, etc. */
// vect_t forces_reduction[4999][5000];
struct particle_s
{
double m; /* Mass */
vect_t s; /* Position */
vect_t v; /* Velocity */
};
int rank, size;
MPI_Datatype particle_s_type;
MPI_Datatype vect_t_type;
enum Tags
{
TAG_CURR = 1000,
TAG_FORCES_SEND,
TAG_FORCES,
};
void Usage(char *prog_name);
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p);
void Get_init_cond(struct particle_s curr[], int n);
void Gen_init_cond(struct particle_s curr[], int n);
void Output_state(double time, struct particle_s curr[], int n);
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n);
void Compute_force_parallel(int part, vect_t forces[], struct particle_s curr[],
int n);
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t);
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p);
void sequential_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
Output_state(0, curr, n);
for (step = 1; step <= n_steps; step++)
{
t = step * delta_t;
memset(forces, 0, n * sizeof(vect_t));
for (part = 0; part < n - 1; part++)
Compute_force(part, forces, curr, n);
for (part = 0; part < n; part++)
Update_part(part, forces, curr, n, delta_t);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
}
Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_seq, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
free(forces);
} /* sequential_solution */
void parallel_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
if (rank == 0)
{
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
Output_state(0, curr, n);
}
MPI_Bcast(&n_steps, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&delta_t, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if (rank != 0)
{
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
}
vect_t *forces_reduced = malloc(n * sizeof(vect_t));
for (step = 1; step <= n_steps; step++)
{
int part_start, part_end;
int chunk;
memset(forces, 0, n * sizeof(vect_t));
MPI_Bcast(curr, n, particle_s_type, 0, MPI_COMM_WORLD);
chunk = (n - 1) / size;
part_start = rank * chunk;
part_end = part_start + chunk;
if (rank == size - 1)
part_end = n - 1;
for (part = part_start; part < part_end; part++)
Compute_force(part, forces, curr, n);
MPI_Reduce(forces, forces_reduced, n * 2, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0)
for (part = 0; part < n; part++)
Update_part(part, forces_reduced, curr, n, delta_t);
}
if (rank == 0)
{
t = step * delta_t;
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_par, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
}
free(curr);
free(forces);
free(forces_reduced);
} /* parallel_solution */
int compare_results(void)
{
return !strcmp(res_seq, res_par);
}
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// define types for communication
MPI_Type_contiguous(sizeof(vect_t) / sizeof(double),
MPI_DOUBLE, &vect_t_type);
MPI_Type_commit(&vect_t_type);
MPI_Type_contiguous(sizeof(struct particle_s) / sizeof(double),
MPI_DOUBLE, &particle_s_type);
MPI_Type_commit(&particle_s_type);
double start_time_seq, end_time_seq, start_time_parallel, end_time_parallel;
if (rank == 0)
{
printf("---------------------Sequential execution---------------------\n");
start_time_seq = MPI_Wtime();
sequential_solution(argc, argv);
end_time_seq = MPI_Wtime();
printf("----------------------Parallel execution----------------------\n");
start_time_parallel = MPI_Wtime();
}
parallel_solution(argc, argv);
if (rank == 0)
{
end_time_parallel = MPI_Wtime();
printf("\nSequential elapsed time: %lfs\n", end_time_seq - start_time_seq);
printf("Parallel elapsed time: %lfs\n", end_time_parallel - start_time_parallel);
if (compare_results())
printf("Test PASSED\n");
else
printf("Test FAILED\n");
}
MPI_Type_free(&vect_t_type);
MPI_Type_free(&particle_s_type);
MPI_Finalize();
return 0;
} /* main */
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n",
prog_name);
fprintf(stderr, " <size of timestep> <output frequency>\n");
fprintf(stderr, " <g|i>\n");
fprintf(stderr, " 'g': program should generate init conds\n");
fprintf(stderr, " 'i': program should get init conds from stdin\n");
exit(0);
} /* Usage */
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p)
{
if (argc != 6)
Usage(argv[0]);
*n_p = strtol(argv[1], NULL, 10);
*n_steps_p = strtol(argv[2], NULL, 10);
*delta_t_p = strtod(argv[3], NULL);
*output_freq_p = strtol(argv[4], NULL, 10);
*g_i_p = argv[5][0];
if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0)
Usage(argv[0]);
if (*g_i_p != 'g' && *g_i_p != 'i')
Usage(argv[0]);
} /* Get_args */
void Get_init_cond(struct particle_s curr[], int n)
{
int part;
printf("For each particle, enter (in order):\n");
printf(" its mass, its x-coord, its y-coord, ");
printf("its x-velocity, its y-velocity\n");
for (part = 0; part < n; part++)
{
scanf("%lf", &curr[part].m);
scanf("%lf", &curr[part].s[X]);
scanf("%lf", &curr[part].s[Y]);
scanf("%lf", &curr[part].v[X]);
scanf("%lf", &curr[part].v[Y]);
}
} /* Get_init_cond */
void Gen_init_cond(struct particle_s curr[], int n)
{
int part;
double mass = 5.0e24;
double gap = 1.0e5;
double speed = 3.0e4;
srandom(1);
for (part = 0; part < n; part++)
{
curr[part].m = mass;
curr[part].s[X] = part * gap;
curr[part].s[Y] = 0.0;
curr[part].v[X] = 0.0;
if (part % 2 == 0)
curr[part].v[Y] = speed;
else
curr[part].v[Y] = -speed;
}
} /* Gen_init_cond */
void Output_state(double time, struct particle_s curr[], int n)
{
int part;
printf("%.2f\n", time);
for (part = 0; part < n; part++)
{
printf("%3d %10.3e ", part, curr[part].s[X]);
printf(" %10.3e ", curr[part].s[Y]);
printf(" %10.3e ", curr[part].v[X]);
printf(" %10.3e\n", curr[part].v[Y]);
}
printf("\n");
} /* Output_state */
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n)
{
int k;
double mg;
vect_t f_part_k;
double len, len_3, fact;
// #pragma omp parallel for private(f_part_k, len, len_3, mg, fact)
for (k = part + 1; k < n; k++)
{
f_part_k[X] = curr[part].s[X] - curr[k].s[X];
f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y];
len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
len_3 = len * len * len;
mg = -G * curr[part].m * curr[k].m;
fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
// #pragma omp atomic
forces[part][X] += f_part_k[X];
// #pragma omp atomic
forces[part][Y] += f_part_k[Y];
forces[k][X] -= f_part_k[X];
forces[k][Y] -= f_part_k[Y];
}
} /* Compute_force */
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t)
{
double fact = delta_t / curr[part].m;
curr[part].s[X] += delta_t * curr[part].v[X];
curr[part].s[Y] += delta_t * curr[part].v[Y];
curr[part].v[X] += fact * forces[part][X];
curr[part].v[Y] += fact * forces[part][Y];
} /* Update_part */
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p)
{
int i, j;
vect_t diff;
double pe = 0.0, ke = 0.0;
double dist, speed_sqr;
for (i = 0; i < n; i++)
{
speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y];
ke += curr[i].m * speed_sqr;
}
ke *= 0.5;
for (i = 0; i < n - 1; i++)
{
for (j = i + 1; j < n; j++)
{
diff[X] = curr[i].s[X] - curr[j].s[X];
diff[Y] = curr[i].s[Y] - curr[j].s[Y];
dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]);
pe += -G * curr[i].m * curr[j].m / dist;
}
}
*kin_en_p = ke;
*pot_en_p = pe;
} /* Compute_energy */
|
common.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "common.h"
#define CONF95 1.96
int nthreads = -1; // Number of OpenMP threads
int delaylength = -1; // The number of iterations to delay for
int outerreps = -1; // Outer repetitions
double delaytime = -1.0; // Length of time to delay for in microseconds
double targettesttime = 0.0; // The length of time in microseconds that the test
// should run for.
unsigned long innerreps; // Inner repetitions
double *times; // Array of doubles storing the benchmark times in microseconds
double referencetime; // The average reference time in microseconds to perform
// outerreps runs
double referencesd; // The standard deviation in the reference time in
// microseconds for outerreps runs.
double testtime; // The average test time in microseconds for
// outerreps runs
double testsd; // The standard deviation in the test time in
// microseconds for outerreps runs.
void usage(char *argv[]) {
printf("Usage: %s.x \n"
"\t--outer-repetitions <outer-repetitions> (default %d)\n"
"\t--test-time <target-test-time> (default %0.2f microseconds)\n"
"\t--delay-time <delay-time> (default %0.4f microseconds)\n"
"\t--delay-length <delay-length> "
"(default auto-generated based on processor speed)\n",
argv[0],
DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME);
}
void parse_args(int argc, char *argv[]) {
// Parse the parameters
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--delay-time") == 0.0) {
delaytime = atof(argv[++arg]);
if (delaytime == 0.0) {
printf("Invalid float:--delay-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--outer-repetitions") == 0) {
outerreps = atoi(argv[++arg]);
if (outerreps == 0) {
printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--test-time") == 0) {
targettesttime = atof(argv[++arg]);
if (targettesttime == 0) {
printf("Invalid integer:--test-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
int getdelaylengthfromtime(double delaytime) {
int i, reps;
double lapsedtime, starttime; // seconds
reps = 1000;
lapsedtime = 0.0;
delaytime = delaytime/1.0E6; // convert from microseconds to seconds
// Note: delaytime is local to this function and thus the conversion
// does not propagate to the main code.
// Here we want to use the delaytime in microseconds to find the
// delaylength in iterations. We start with delaylength=0 and
// increase until we get a large enough delaytime, return delaylength
// in iterations.
delaylength = 0;
delay(delaylength);
while (lapsedtime < delaytime) {
delaylength = delaylength * 1.1 + 1;
starttime = getclock();
for (i = 0; i < reps; i++) {
delay(delaylength);
}
lapsedtime = (getclock() - starttime) / (double) reps;
}
return delaylength;
}
unsigned long getinnerreps(void (*test)(void)) {
innerreps = 10L; // some initial value
double time = 0.0;
while (time < targettesttime) {
double start = getclock();
test();
time = (getclock() - start) * 1.0e6;
innerreps *=2;
// Test to stop code if compiler is optimising reference time expressions away
if (innerreps > (targettesttime*1.0e15)) {
printf("Compiler has optimised reference loop away, STOP! \n");
printf("Try recompiling with lower optimisation level \n");
exit(1);
}
}
return innerreps;
}
void printheader(char *name) {
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing %s time using %lu reps\n", name, innerreps);
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff;
int i, nr;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
for (i = 1; i <= outerreps; i++) {
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
}
meantime = totaltime / outerreps;
sumsq = 0;
for (i = 1; i <= outerreps; i++) {
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
sd = sqrt(sumsq / (outerreps - 1));
cutoff = 3.0 * sd;
nr = 0;
for (i = 1; i <= outerreps; i++) {
if (fabs(times[i] - meantime) > cutoff)
nr++;
}
printf("\n");
printf("Sample_size Average Min Max S.D. Outliers\n");
printf(" %d %f %f %f %f %d\n",
outerreps, meantime, mintime, maxtime, sd, nr);
printf("\n");
*mtp = meantime;
*sdp = sd;
}
void printfooter(char *name, double testtime, double testsd,
double referencetime, double refsd) {
printf("%s time = %f microseconds +/- %f\n",
name, testtime, CONF95*testsd);
printf("%s overhead = %f microseconds +/- %f\n",
name, testtime-referencetime, CONF95*(testsd+referencesd));
}
void printreferencefooter(char *name, double referencetime, double referencesd) {
printf("%s time = %f microseconds +/- %f\n",
name, referencetime, CONF95 * referencesd);
}
void init(int argc, char **argv)
{
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
parse_args(argc, argv);
if (outerreps == -1) {
outerreps = DEFAULT_OUTER_REPS;
}
if (targettesttime == 0.0) {
targettesttime = DEFAULT_TEST_TARGET_TIME;
}
if (delaytime == -1.0) {
delaytime = DEFAULT_DELAY_TIME;
}
delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations
times = malloc((outerreps+1) * sizeof(double));
printf("Running OpenMP benchmark version 3.0\n"
"\t%d thread(s)\n"
"\t%d outer repetitions\n"
"\t%0.2f test time (microseconds)\n"
"\t%d delay length (iterations) \n"
"\t%f delay time (microseconds)\n",
nthreads,
outerreps, targettesttime,
delaylength, delaytime);
}
void finalise(void) {
free(times);
}
void initreference(char *name) {
printheader(name);
}
/* Calculate the reference time. */
void reference(char *name, void (*refer)(void)) {
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(refer);
initreference(name);
for (k = 0; k <= outerreps; k++) {
start = getclock();
refer();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisereference(name);
}
void finalisereference(char *name) {
stats(&referencetime, &referencesd);
printreferencefooter(name, referencetime, referencesd);
}
void intitest(char *name) {
printheader(name);
}
void finalisetest(char *name) {
stats(&testtime, &testsd);
printfooter(name, testtime, testsd, referencetime, referencesd);
}
/* Function to run a microbenchmark test*/
void benchmark(char *name, void (*test)(void))
{
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(test);
intitest(name);
for (k=0; k<=outerreps; k++) {
start = getclock();
test();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisetest(name);
}
// For the Cray compiler on HECToR we need to turn off optimisation
// for the delay and array_delay functions. Other compilers should
// not be afffected.
#pragma _CRI noopt
void delay(int delaylength) {
int i;
float a = 0.;
for (i = 0; i < delaylength; i++)
a += i;
if (a < 0)
printf("%f \n", a);
}
void array_delay(int delaylength, double a[1]) {
int i;
a[0] = 1.0;
for (i = 0; i < delaylength; i++)
a[0] += i;
if (a[0] < 0)
printf("%f \n", a[0]);
}
// Re-enable optimisation for remainder of source.
#pragma _CRI opt
double getclock() {
double time;
// Returns a value in seconds of the time elapsed from some arbitrary,
// but consistent point.
double omp_get_wtime(void);
time = omp_get_wtime();
return time;
}
int returnfalse() {
return 0;
}
|
filterCutoutOMP.c | /*
* Copyright 2014 NeuroData (http://neurodata.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include<stdio.h>
#include<stdint.h>
#include<omp.h>
#include<stdbool.h>
void filterCutoutOMP32 ( uint32_t * cutout, int cutoutsize, uint32_t * filterlist, int listsize)
{
int i,j;
bool equal;
//printf("MAX THREADS: %d",omp_get_max_threads());
#pragma omp parallel num_threads(omp_get_max_threads())
{
#pragma omp for private(i,j,equal) schedule(dynamic)
for ( i=0; i<cutoutsize; i++)
{
equal = false;
for( j=0; j<listsize; j++)
{
if( cutout[i] == filterlist[j] )
{
equal = true;
break;
}
}
if( !equal || cutout[i] > filterlist[j] )
cutout[i] = 0;
}
int ID = omp_get_thread_num();
//printf("THREAD ID: %d",ID);
}
}
void filterCutoutOMP64 ( uint64_t * cutout, int cutoutsize, uint64_t * filterlist, int listsize)
{
int i,j;
bool equal;
//printf("MAX THREADS: %d",omp_get_max_threads());
#pragma omp parallel num_threads(omp_get_max_threads())
{
#pragma omp for private(i,j,equal) schedule(dynamic)
for ( i=0; i<cutoutsize; i++)
{
equal = false;
for( j=0; j<listsize; j++)
{
if( cutout[i] == filterlist[j] )
{
equal = true;
break;
}
}
if( !equal || cutout[i] > filterlist[j] )
cutout[i] = 0;
}
int ID = omp_get_thread_num();
//printf("THREAD ID: %d",ID);
}
}
|
ast-dump-openmp-target-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:4:1, col:24>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:10:1, col:24>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:17:1, col:36>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:24:1, col:36>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetSimdDirective {{.*}} <line:31:1, col:36>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:34> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
gravity.c | /**
* @file gravity.c
* @brief Direct gravity calculation, O(N^2).
* @author Hanno Rein <hanno@hanno-rein.de>
*
* @details This is the crudest implementation of an N-body code
* which sums up every pair of particles. It is only useful very small
* particle numbers (N<~100) as it scales as O(N^2). Note that the MPI
* implementation is not well tested and only works for very specific
* problems. This should be resolved in the future.
*
*
* @section LICENSE
* Copyright (c) 2011 Hanno Rein, Shangfei Liu
*
* This file is part of rebound.
*
* rebound is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* rebound is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with rebound. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include "particle.h"
#include "rebound.h"
#include "tree.h"
#include "boundary.h"
#ifdef MPI
#include "communication_mpi.h"
#endif
/**
* @brief The function loops over all trees to call calculate_forces_for_particle_from_cell() tree to calculate forces for each particle.
* @param r REBOUND simulation to consider
* @param pt Index of the particle the force is calculated for.
* @param gb Ghostbox plus position of the particle (precalculated).
*/
static void reb_calculate_acceleration_for_particle(const struct reb_simulation* const r, const int pt, const struct reb_ghostbox gb);
/**
* Main Gravity Routine
*/
void reb_calculate_acceleration(struct reb_simulation* r){
struct reb_particle* const particles = r->particles;
const int N = r->N;
const int N_active = r->N_active;
const double G = r->G;
const double softening2 = r->softening*r->softening;
const unsigned int _gravity_ignore_terms = r->gravity_ignore_terms;
const int _N_active = ((N_active==-1)?N:N_active) - r->N_var;
const int _N_real = N - r->N_var;
const int _testparticle_type = r->testparticle_type;
switch (r->gravity){
case REB_GRAVITY_NONE: // Do nothing.
break;
case REB_GRAVITY_BASIC:
{
const int nghostx = r->nghostx;
const int nghosty = r->nghosty;
const int nghostz = r->nghostz;
#pragma omp parallel for schedule(guided)
for (int i=0; i<N; i++){
particles[i].ax = 0;
particles[i].ay = 0;
particles[i].az = 0;
}
// Summing over all Ghost Boxes
for (int gbx=-nghostx; gbx<=nghostx; gbx++){
for (int gby=-nghosty; gby<=nghosty; gby++){
for (int gbz=-nghostz; gbz<=nghostz; gbz++){
struct reb_ghostbox gb = reb_boundary_get_ghostbox(r, gbx,gby,gbz);
// Summing over all particle pairs
#pragma omp parallel for schedule(guided)
for (int i=0; i<_N_real; i++){
for (int j=0; j<_N_active; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0) )) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0) )) continue;
if (i==j) continue;
const double dx = (gb.shiftx+particles[i].x) - particles[j].x;
const double dy = (gb.shifty+particles[i].y) - particles[j].y;
const double dz = (gb.shiftz+particles[i].z) - particles[j].z;
const double _r = sqrt(dx*dx + dy*dy + dz*dz + softening2);
const double prefact = -G/(_r*_r*_r)*particles[j].m;
particles[i].ax += prefact*dx;
particles[i].ay += prefact*dy;
particles[i].az += prefact*dz;
}
}
if (_testparticle_type){
for (int i=0; i<_N_active; i++){
for (int j=_N_active; j<_N_real; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) )) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0) )) continue;
const double dx = (gb.shiftx+particles[i].x) - particles[j].x;
const double dy = (gb.shifty+particles[i].y) - particles[j].y;
const double dz = (gb.shiftz+particles[i].z) - particles[j].z;
const double _r = sqrt(dx*dx + dy*dy + dz*dz + softening2);
const double prefact = -G/(_r*_r*_r)*particles[j].m;
particles[i].ax += prefact*dx;
particles[i].ay += prefact*dy;
particles[i].az += prefact*dz;
}
}
}
}
}
}
}
break;
case REB_GRAVITY_COMPENSATED:
{
if (r->gravity_cs_allocatedN<N){
r->gravity_cs = realloc(r->gravity_cs,N*sizeof(struct reb_vec3d));
r->gravity_cs_allocatedN = N;
}
struct reb_vec3d* restrict const cs = r->gravity_cs;
#pragma omp parallel for schedule(guided)
for (int i=0; i<_N_real; i++){
particles[i].ax = 0.;
particles[i].ay = 0.;
particles[i].az = 0.;
cs[i].x = 0.;
cs[i].y = 0.;
cs[i].z = 0.;
}
// Summing over all massive particle pairs
#ifdef OPENMP
#pragma omp parallel for schedule(guided)
for (int i=0; i<_N_active; i++){
for (int j=0; j<_N_active; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
if (i==j) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz + softening2;
const double r = sqrt(r2);
const double prefact = G/(r2*r);
const double prefactj = -prefact*particles[j].m;
{
double ix = prefactj*dx;
double yx = ix - cs[i].x;
double tx = particles[i].ax + yx;
cs[i].x = (tx - particles[i].ax) - yx;
particles[i].ax = tx;
double iy = prefactj*dy;
double yy = iy- cs[i].y;
double ty = particles[i].ay + yy;
cs[i].y = (ty - particles[i].ay) - yy;
particles[i].ay = ty;
double iz = prefactj*dz;
double yz = iz - cs[i].z;
double tz = particles[i].az + yz;
cs[i].z = (tz - particles[i].az) - yz;
particles[i].az = tz;
}
}
}
// Testparticles
#pragma omp parallel for schedule(guided)
for (int i=_N_active; i<_N_real; i++){
for (int j=0; j<_N_active; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz + softening2;
const double r = sqrt(r2);
const double prefact = G/(r2*r);
const double prefactj = -prefact*particles[j].m;
{
double ix = prefactj*dx;
double yx = ix - cs[i].x;
double tx = particles[i].ax + yx;
cs[i].x = (tx - particles[i].ax) - yx;
particles[i].ax = tx;
double iy = prefactj*dy;
double yy = iy- cs[i].y;
double ty = particles[i].ay + yy;
cs[i].y = (ty - particles[i].ay) - yy;
particles[i].ay = ty;
double iz = prefactj*dz;
double yz = iz - cs[i].z;
double tz = particles[i].az + yz;
cs[i].z = (tz - particles[i].az) - yz;
particles[i].az = tz;
}
}
}
if (_testparticle_type){
#pragma omp parallel for schedule(guided)
for (int j=0; j<_N_active; j++){
for (int i=_N_active; i<_N_real; i++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz + softening2;
const double r = sqrt(r2);
const double prefact = G/(r2*r);
const double prefacti = prefact*particles[i].m;
{
double ix = prefacti*dx;
double yx = ix - cs[j].x;
double tx = particles[j].ax + yx;
cs[j].x = (tx - particles[j].ax) - yx;
particles[j].ax = tx;
double iy = prefacti*dy;
double yy = iy - cs[j].y;
double ty = particles[j].ay + yy;
cs[j].y = (ty - particles[j].ay) - yy;
particles[j].ay = ty;
double iz = prefacti*dz;
double yz = iz - cs[j].z;
double tz = particles[j].az + yz;
cs[j].z = (tz - particles[j].az) - yz;
particles[j].az = tz;
}
}
}
}
#else // OPENMP
for (int i=0; i<_N_active; i++){
for (int j=i+1; j<_N_active; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz + softening2;
const double r = sqrt(r2);
const double prefact = G/(r2*r);
const double prefacti = prefact*particles[i].m;
const double prefactj = -prefact*particles[j].m;
{
double ix = prefactj*dx;
double yx = ix - cs[i].x;
double tx = particles[i].ax + yx;
cs[i].x = (tx - particles[i].ax) - yx;
particles[i].ax = tx;
double iy = prefactj*dy;
double yy = iy- cs[i].y;
double ty = particles[i].ay + yy;
cs[i].y = (ty - particles[i].ay) - yy;
particles[i].ay = ty;
double iz = prefactj*dz;
double yz = iz - cs[i].z;
double tz = particles[i].az + yz;
cs[i].z = (tz - particles[i].az) - yz;
particles[i].az = tz;
}
{
double ix = prefacti*dx;
double yx = ix - cs[j].x;
double tx = particles[j].ax + yx;
cs[j].x = (tx - particles[j].ax) - yx;
particles[j].ax = tx;
double iy = prefacti*dy;
double yy = iy - cs[j].y;
double ty = particles[j].ay + yy;
cs[j].y = (ty - particles[j].ay) - yy;
particles[j].ay = ty;
double iz = prefacti*dz;
double yz = iz - cs[j].z;
double tz = particles[j].az + yz;
cs[j].z = (tz - particles[j].az) - yz;
particles[j].az = tz;
}
}
}
// Testparticles
for (int i=_N_active; i<_N_real; i++){
for (int j=0; j<_N_active; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz + softening2;
const double r = sqrt(r2);
const double prefact = G/(r2*r);
const double prefactj = -prefact*particles[j].m;
{
double ix = prefactj*dx;
double yx = ix - cs[i].x;
double tx = particles[i].ax + yx;
cs[i].x = (tx - particles[i].ax) - yx;
particles[i].ax = tx;
double iy = prefactj*dy;
double yy = iy- cs[i].y;
double ty = particles[i].ay + yy;
cs[i].y = (ty - particles[i].ay) - yy;
particles[i].ay = ty;
double iz = prefactj*dz;
double yz = iz - cs[i].z;
double tz = particles[i].az + yz;
cs[i].z = (tz - particles[i].az) - yz;
particles[i].az = tz;
}
if (_testparticle_type){
const double prefacti = prefact*particles[i].m;
{
double ix = prefacti*dx;
double yx = ix - cs[j].x;
double tx = particles[j].ax + yx;
cs[j].x = (tx - particles[j].ax) - yx;
particles[j].ax = tx;
double iy = prefacti*dy;
double yy = iy - cs[j].y;
double ty = particles[j].ay + yy;
cs[j].y = (ty - particles[j].ay) - yy;
particles[j].ay = ty;
double iz = prefacti*dz;
double yz = iz - cs[j].z;
double tz = particles[j].az + yz;
cs[j].z = (tz - particles[j].az) - yz;
particles[j].az = tz;
}
}
}
}
#endif // OPENMP
}
break;
case REB_GRAVITY_TREE:
{
#pragma omp parallel for schedule(guided)
for (int i=0; i<N; i++){
particles[i].ax = 0;
particles[i].ay = 0;
particles[i].az = 0;
}
// Summing over all Ghost Boxes
for (int gbx=-r->nghostx; gbx<=r->nghostx; gbx++){
for (int gby=-r->nghosty; gby<=r->nghosty; gby++){
for (int gbz=-r->nghostz; gbz<=r->nghostz; gbz++){
// Summing over all particle pairs
#pragma omp parallel for schedule(guided)
for (int i=0; i<N; i++){
struct reb_ghostbox gb = reb_boundary_get_ghostbox(r, gbx,gby,gbz);
// Precalculated shifted position
gb.shiftx += particles[i].x;
gb.shifty += particles[i].y;
gb.shiftz += particles[i].z;
reb_calculate_acceleration_for_particle(r, i, gb);
}
}
}
}
}
break;
default:
reb_exit("Gravity calculation not yet implemented.");
}
}
void reb_calculate_acceleration_var(struct reb_simulation* r){
struct reb_particle* const particles = r->particles;
const double G = r->G;
const unsigned int _gravity_ignore_terms = r->gravity_ignore_terms;
const int N = r->N;
const int _N_real = N - r->N_var;
switch (r->gravity){
case REB_GRAVITY_NONE: // Do nothing.
break;
case REB_GRAVITY_COMPENSATED:
{
struct reb_vec3d* restrict const cs = r->gravity_cs;
#pragma omp parallel for schedule(guided)
for (int i=_N_real; i<N; i++){
cs[i].x = 0.;
cs[i].y = 0.;
cs[i].z = 0.;
}
}
break;
case REB_GRAVITY_BASIC:
for (int v=0;v<r->var_config_N;v++){
struct reb_variational_configuration const vc = r->var_config[v];
if (vc.order==1){
//////////////////
/// 1st order ///
//////////////////
struct reb_particle* const particles_var1 = particles + vc.index;
if (vc.testparticle<0){
for (int i=0; i<_N_real; i++){
particles_var1[i].ax = 0.;
particles_var1[i].ay = 0.;
particles_var1[i].az = 0.;
}
for (int i=0; i<_N_real; i++){
for (int j=i+1; j<_N_real; j++){
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz;
const double _r = sqrt(r2);
const double r3inv = 1./(r2*_r);
const double r5inv = 3.*r3inv/r2;
const double ddx = particles_var1[i].x - particles_var1[j].x;
const double ddy = particles_var1[i].y - particles_var1[j].y;
const double ddz = particles_var1[i].z - particles_var1[j].z;
const double Gmi = G * particles[i].m;
const double Gmj = G * particles[j].m;
// Variational equations
const double dxdx = dx*dx*r5inv - r3inv;
const double dydy = dy*dy*r5inv - r3inv;
const double dzdz = dz*dz*r5inv - r3inv;
const double dxdy = dx*dy*r5inv;
const double dxdz = dx*dz*r5inv;
const double dydz = dy*dz*r5inv;
const double dax = ddx * dxdx + ddy * dxdy + ddz * dxdz;
const double day = ddx * dxdy + ddy * dydy + ddz * dydz;
const double daz = ddx * dxdz + ddy * dydz + ddz * dzdz;
// Variational mass contributions
const double dGmi = G*particles_var1[i].m;
const double dGmj = G*particles_var1[j].m;
particles_var1[i].ax += Gmj * dax - dGmj*r3inv*dx;
particles_var1[i].ay += Gmj * day - dGmj*r3inv*dy;
particles_var1[i].az += Gmj * daz - dGmj*r3inv*dz;
particles_var1[j].ax -= Gmi * dax - dGmi*r3inv*dx;
particles_var1[j].ay -= Gmi * day - dGmi*r3inv*dy;
particles_var1[j].az -= Gmi * daz - dGmi*r3inv*dz;
}
}
}else{ //testparticle
int i = vc.testparticle;
particles_var1[0].ax = 0.;
particles_var1[0].ay = 0.;
particles_var1[0].az = 0.;
for (int j=0; j<_N_real; j++){
if (i==j) continue;
if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz;
const double _r = sqrt(r2);
const double r3inv = 1./(r2*_r);
const double r5inv = 3.*r3inv/r2;
const double ddx = particles_var1[0].x;
const double ddy = particles_var1[0].y;
const double ddz = particles_var1[0].z;
const double Gmj = G * particles[j].m;
// Variational equations
const double dxdx = dx*dx*r5inv - r3inv;
const double dydy = dy*dy*r5inv - r3inv;
const double dzdz = dz*dz*r5inv - r3inv;
const double dxdy = dx*dy*r5inv;
const double dxdz = dx*dz*r5inv;
const double dydz = dy*dz*r5inv;
const double dax = ddx * dxdx + ddy * dxdy + ddz * dxdz;
const double day = ddx * dxdy + ddy * dydy + ddz * dydz;
const double daz = ddx * dxdz + ddy * dydz + ddz * dzdz;
// No variational mass contributions for test particles!
particles_var1[0].ax += Gmj * dax;
particles_var1[0].ay += Gmj * day;
particles_var1[0].az += Gmj * daz;
}
}
}else if (vc.order==2){
//////////////////
/// 2nd order ///
//////////////////
struct reb_particle* const particles_var2 = particles + vc.index;
struct reb_particle* const particles_var1a = particles + vc.index_1st_order_a;
struct reb_particle* const particles_var1b = particles + vc.index_1st_order_b;
if (vc.testparticle<0){
for (int i=0; i<_N_real; i++){
particles_var2[i].ax = 0.;
particles_var2[i].ay = 0.;
particles_var2[i].az = 0.;
}
for (int i=0; i<_N_real; i++){
for (int j=i+1; j<_N_real; j++){
// TODO: Need to implement WH skipping
//if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
//if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz;
const double r = sqrt(r2);
const double r3inv = 1./(r2*r);
const double r5inv = r3inv/r2;
const double r7inv = r5inv/r2;
const double ddx = particles_var2[i].x - particles_var2[j].x;
const double ddy = particles_var2[i].y - particles_var2[j].y;
const double ddz = particles_var2[i].z - particles_var2[j].z;
const double Gmi = G * particles[i].m;
const double Gmj = G * particles[j].m;
const double ddGmi = G*particles_var2[i].m;
const double ddGmj = G*particles_var2[j].m;
// Variational equations
// delta^(2) terms
double dax = ddx * ( 3.*dx*dx*r5inv - r3inv )
+ ddy * ( 3.*dx*dy*r5inv )
+ ddz * ( 3.*dx*dz*r5inv );
double day = ddx * ( 3.*dy*dx*r5inv )
+ ddy * ( 3.*dy*dy*r5inv - r3inv )
+ ddz * ( 3.*dy*dz*r5inv );
double daz = ddx * ( 3.*dz*dx*r5inv )
+ ddy * ( 3.*dz*dy*r5inv )
+ ddz * ( 3.*dz*dz*r5inv - r3inv );
// delta^(1) delta^(1) terms
const double dk1dx = particles_var1a[i].x - particles_var1a[j].x;
const double dk1dy = particles_var1a[i].y - particles_var1a[j].y;
const double dk1dz = particles_var1a[i].z - particles_var1a[j].z;
const double dk2dx = particles_var1b[i].x - particles_var1b[j].x;
const double dk2dy = particles_var1b[i].y - particles_var1b[j].y;
const double dk2dz = particles_var1b[i].z - particles_var1b[j].z;
const double rdk1 = dx*dk1dx + dy*dk1dy + dz*dk1dz;
const double rdk2 = dx*dk2dx + dy*dk2dy + dz*dk2dz;
const double dk1dk2 = dk1dx*dk2dx + dk1dy*dk2dy + dk1dz*dk2dz;
dax += 3.* r5inv * dk2dx * rdk1
+ 3.* r5inv * dk1dx * rdk2
+ 3.* r5inv * dx * dk1dk2
- 15. * dx * r7inv * rdk1 * rdk2;
day += 3.* r5inv * dk2dy * rdk1
+ 3.* r5inv * dk1dy * rdk2
+ 3.* r5inv * dy * dk1dk2
- 15. * dy * r7inv * rdk1 * rdk2;
daz += 3.* r5inv * dk2dz * rdk1
+ 3.* r5inv * dk1dz * rdk2
+ 3.* r5inv * dz * dk1dk2
- 15. * dz * r7inv * rdk1 * rdk2;
const double dk1Gmi = G * particles_var1a[i].m;
const double dk1Gmj = G * particles_var1a[j].m;
const double dk2Gmi = G * particles_var1b[i].m;
const double dk2Gmj = G * particles_var1b[j].m;
particles_var2[i].ax += Gmj * dax
- ddGmj*r3inv*dx
- dk2Gmj*r3inv*dk1dx + 3.*dk2Gmj*r5inv*dx*rdk1
- dk1Gmj*r3inv*dk2dx + 3.*dk1Gmj*r5inv*dx*rdk2;
particles_var2[i].ay += Gmj * day
- ddGmj*r3inv*dy
- dk2Gmj*r3inv*dk1dy + 3.*dk2Gmj*r5inv*dy*rdk1
- dk1Gmj*r3inv*dk2dy + 3.*dk1Gmj*r5inv*dy*rdk2;
particles_var2[i].az += Gmj * daz
- ddGmj*r3inv*dz
- dk2Gmj*r3inv*dk1dz + 3.*dk2Gmj*r5inv*dz*rdk1
- dk1Gmj*r3inv*dk2dz + 3.*dk1Gmj*r5inv*dz*rdk2;
particles_var2[j].ax -= Gmi * dax
- ddGmi*r3inv*dx
- dk2Gmi*r3inv*dk1dx + 3.*dk2Gmi*r5inv*dx*rdk1
- dk1Gmi*r3inv*dk2dx + 3.*dk1Gmi*r5inv*dx*rdk2;
particles_var2[j].ay -= Gmi * day
- ddGmi*r3inv*dy
- dk2Gmi*r3inv*dk1dy + 3.*dk2Gmi*r5inv*dy*rdk1
- dk1Gmi*r3inv*dk2dy + 3.*dk1Gmi*r5inv*dy*rdk2;
particles_var2[j].az -= Gmi * daz
- ddGmi*r3inv*dz
- dk2Gmi*r3inv*dk1dz + 3.*dk2Gmi*r5inv*dz*rdk1
- dk1Gmi*r3inv*dk2dz + 3.*dk1Gmi*r5inv*dz*rdk2;
}
}
}else{ //testparticle
int i = vc.testparticle;
particles_var2[0].ax = 0.;
particles_var2[0].ay = 0.;
particles_var2[0].az = 0.;
for (int j=0; j<_N_real; j++){
if (i==j) continue;
// TODO: Need to implement WH skipping
//if (_gravity_ignore_terms==1 && ((j==1 && i==0) || (i==1 && j==0))) continue;
//if (_gravity_ignore_terms==2 && ((j==0 || i==0))) continue;
const double dx = particles[i].x - particles[j].x;
const double dy = particles[i].y - particles[j].y;
const double dz = particles[i].z - particles[j].z;
const double r2 = dx*dx + dy*dy + dz*dz;
const double r = sqrt(r2);
const double r3inv = 1./(r2*r);
const double r5inv = r3inv/r2;
const double r7inv = r5inv/r2;
const double ddx = particles_var2[0].x;
const double ddy = particles_var2[0].y;
const double ddz = particles_var2[0].z;
const double Gmj = G * particles[j].m;
// Variational equations
// delta^(2) terms
double dax = ddx * ( 3.*dx*dx*r5inv - r3inv )
+ ddy * ( 3.*dx*dy*r5inv )
+ ddz * ( 3.*dx*dz*r5inv );
double day = ddx * ( 3.*dy*dx*r5inv )
+ ddy * ( 3.*dy*dy*r5inv - r3inv )
+ ddz * ( 3.*dy*dz*r5inv );
double daz = ddx * ( 3.*dz*dx*r5inv )
+ ddy * ( 3.*dz*dy*r5inv )
+ ddz * ( 3.*dz*dz*r5inv - r3inv );
// delta^(1) delta^(1) terms
const double dk1dx = particles_var1a[0].x;
const double dk1dy = particles_var1a[0].y;
const double dk1dz = particles_var1a[0].z;
const double dk2dx = particles_var1b[0].x;
const double dk2dy = particles_var1b[0].y;
const double dk2dz = particles_var1b[0].z;
const double rdk1 = dx*dk1dx + dy*dk1dy + dz*dk1dz;
const double rdk2 = dx*dk2dx + dy*dk2dy + dz*dk2dz;
const double dk1dk2 = dk1dx*dk2dx + dk1dy*dk2dy + dk1dz*dk2dz;
dax += 3.* r5inv * dk2dx * rdk1
+ 3.* r5inv * dk1dx * rdk2
+ 3.* r5inv * dx * dk1dk2
- 15. * dx * r7inv * rdk1 * rdk2;
day += 3.* r5inv * dk2dy * rdk1
+ 3.* r5inv * dk1dy * rdk2
+ 3.* r5inv * dy * dk1dk2
- 15. * dy * r7inv * rdk1 * rdk2;
daz += 3.* r5inv * dk2dz * rdk1
+ 3.* r5inv * dk1dz * rdk2
+ 3.* r5inv * dz * dk1dk2
- 15. * dz * r7inv * rdk1 * rdk2;
// No variational mass contributions for test particles!
particles_var2[0].ax += Gmj * dax;
particles_var2[0].ay += Gmj * day;
particles_var2[0].az += Gmj * daz;
}
}
}
}
break;
default:
reb_exit("Variational gravity calculation not yet implemented.");
}
}
// Helper routines for REB_GRAVITY_TREE
/**
* @brief The function calls itself recursively using cell breaking criterion to check whether it can use center of mass (and mass quadrupole tensor) to calculate forces.
* Calculate the acceleration for a particle from a given cell and all its daughter cells.
*
* @param r REBOUND simulation to consider
* @param pt Index of the particle the force is calculated for.
* @param node Pointer to the cell the force is calculated from.
* @param gb Ghostbox plus position of the particle (precalculated).
*/
static void reb_calculate_acceleration_for_particle_from_cell(const struct reb_simulation* const r, const int pt, const struct reb_treecell *node, const struct reb_ghostbox gb);
static void reb_calculate_acceleration_for_particle(const struct reb_simulation* const r, const int pt, const struct reb_ghostbox gb) {
for(int i=0;i<r->root_n;i++){
struct reb_treecell* node = r->tree_root[i];
if (node!=NULL){
reb_calculate_acceleration_for_particle_from_cell(r, pt, node, gb);
}
}
}
static void reb_calculate_acceleration_for_particle_from_cell(const struct reb_simulation* r, const int pt, const struct reb_treecell *node, const struct reb_ghostbox gb) {
const double G = r->G;
const double softening2 = r->softening*r->softening;
struct reb_particle* const particles = r->particles;
const double dx = gb.shiftx - node->mx;
const double dy = gb.shifty - node->my;
const double dz = gb.shiftz - node->mz;
const double r2 = dx*dx + dy*dy + dz*dz;
if ( node->pt < 0 ) { // Not a leaf
if ( node->w*node->w > r->opening_angle2*r2 ){
for (int o=0; o<8; o++) {
if (node->oct[o] != NULL) {
reb_calculate_acceleration_for_particle_from_cell(r, pt, node->oct[o], gb);
}
}
} else {
double _r = sqrt(r2 + softening2);
double prefact = -G/(_r*_r*_r)*node->m;
#ifdef QUADRUPOLE
double qprefact = G/(_r*_r*_r*_r*_r);
particles[pt].ax += qprefact*(dx*node->mxx + dy*node->mxy + dz*node->mxz);
particles[pt].ay += qprefact*(dx*node->mxy + dy*node->myy + dz*node->myz);
particles[pt].az += qprefact*(dx*node->mxz + dy*node->myz + dz*node->mzz);
double mrr = dx*dx*node->mxx + dy*dy*node->myy + dz*dz*node->mzz
+ 2.*dx*dy*node->mxy + 2.*dx*dz*node->mxz + 2.*dy*dz*node->myz;
qprefact *= -5.0/(2.0*_r*_r)*mrr;
particles[pt].ax += (qprefact + prefact) * dx;
particles[pt].ay += (qprefact + prefact) * dy;
particles[pt].az += (qprefact + prefact) * dz;
#else
particles[pt].ax += prefact*dx;
particles[pt].ay += prefact*dy;
particles[pt].az += prefact*dz;
#endif
}
} else { // It's a leaf node
if (node->pt == pt) return;
double _r = sqrt(r2 + softening2);
double prefact = -G/(_r*_r*_r)*node->m;
particles[pt].ax += prefact*dx;
particles[pt].ay += prefact*dy;
particles[pt].az += prefact*dz;
}
}
|
GenerateICs.c | #include <math.h>
#include <unistd.h>
#include <stdio.h>
#include <stdbool.h>
#include <ctype.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
//#include <pthread.h>
#include <omp.h>
#include <complex.h>
#include <fftw3.h>
#include <gsl/gsl_interp.h>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_roots.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include "21cmFAST.h"
#include "exceptions.h"
#include "logger.h"
#include "Constants.h"
#include "Globals.h"
#include "indexing.c"
#include "UsefulFunctions.c"
#include "ps.c"
#include "dft.c"
#include "PerturbField.c"
#include "bubble_helper_progs.c"
#include "elec_interp.c"
#include "heating_helper_progs.c"
#include "recombinations.c"
#include "IonisationBox.c"
#include "SpinTemperatureBox.c"
#include "BrightnessTemperatureBox.c"
#include "FindHaloes.c"
#include "PerturbHaloField.c"
void adj_complex_conj(fftwf_complex *HIRES_box, struct UserParams *user_params, struct CosmoParams *cosmo_params){
/***** Adjust the complex conjugate relations for a real array *****/
int i, j, k;
// corners
HIRES_box[C_INDEX(0,0,0)] = 0;
HIRES_box[C_INDEX(0,0,MIDDLE)] = crealf(HIRES_box[C_INDEX(0,0,MIDDLE)]);
HIRES_box[C_INDEX(0,MIDDLE,0)] = crealf(HIRES_box[C_INDEX(0,MIDDLE,0)]);
HIRES_box[C_INDEX(0,MIDDLE,MIDDLE)] = crealf(HIRES_box[C_INDEX(0,MIDDLE,MIDDLE)]);
HIRES_box[C_INDEX(MIDDLE,0,0)] = crealf(HIRES_box[C_INDEX(MIDDLE,0,0)]);
HIRES_box[C_INDEX(MIDDLE,0,MIDDLE)] = crealf(HIRES_box[C_INDEX(MIDDLE,0,MIDDLE)]);
HIRES_box[C_INDEX(MIDDLE,MIDDLE,0)] = crealf(HIRES_box[C_INDEX(MIDDLE,MIDDLE,0)]);
HIRES_box[C_INDEX(MIDDLE,MIDDLE,MIDDLE)] = crealf(HIRES_box[C_INDEX(MIDDLE,MIDDLE,MIDDLE)]);
// do entire i except corners
#pragma omp parallel shared(HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=1; i<MIDDLE; i++){
// just j corners
for (j=0; j<=MIDDLE; j+=MIDDLE){
for (k=0; k<=MIDDLE; k+=MIDDLE){
HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,j,k)]);
}
}
// all of j
for (j=1; j<MIDDLE; j++){
for (k=0; k<=MIDDLE; k+=MIDDLE){
HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,(user_params->DIM)-j,k)]);
HIRES_box[C_INDEX(i,(user_params->DIM)-j,k)] = conjf(HIRES_box[C_INDEX((user_params->DIM)-i,j,k)]);
}
}
} // end loop over i
}
// now the i corners
#pragma omp parallel shared(HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<=MIDDLE; i+=MIDDLE){
for (j=1; j<MIDDLE; j++){
for (k=0; k<=MIDDLE; k+=MIDDLE){
HIRES_box[C_INDEX(i,j,k)] = conjf(HIRES_box[C_INDEX(i,(user_params->DIM)-j,k)]);
}
}
} // end loop over remaining j
}
}
// Re-write of init.c for original 21cmFAST
int ComputeInitialConditions(
unsigned long long random_seed, struct UserParams *user_params,
struct CosmoParams *cosmo_params, struct InitialConditions *boxes
){
// Generates the initial conditions: gaussian random density field (user_params->DIM^3) as well as the equal or lower resolution velocity fields, and smoothed density field (user_params->HII_DIM^3).
//
// Author: Andrei Mesinger
// Date: 9/29/06
int status;
Try{ // This Try wraps the entire function so we don't indent.
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
unsigned long long ct;
int n_x, n_y, n_z, i, j, k, ii, thread_num, dimension;
float k_x, k_y, k_z, k_mag, p, a, b, k_sq;
double pixel_deltax;
float p_vcb, vcb_i;
float f_pixel_factor;
gsl_rng * r[user_params->N_THREADS];
gsl_rng * rseed = gsl_rng_alloc(gsl_rng_mt19937); // An RNG for generating seeds for multithreading
gsl_rng_set(rseed, random_seed);
omp_set_num_threads(user_params->N_THREADS);
switch(user_params->PERTURB_ON_HIGH_RES) {
case 0:
dimension = user_params->HII_DIM;
break;
case 1:
dimension = user_params->DIM;
break;
}
// ************ INITIALIZATION ********************** //
unsigned int seeds[user_params->N_THREADS];
// For multithreading, seeds for the RNGs are generated from an initial RNG (based on the input random_seed) and then shuffled (Author: Fred Davies)
int num_int = INT_MAX/16;
unsigned int *many_ints = (unsigned int *)malloc((size_t)(num_int*sizeof(unsigned int))); // Some large number of possible integers
for (i=0; i<num_int; i++) {
many_ints[i] = i;
}
gsl_ran_choose(rseed, seeds, user_params->N_THREADS, many_ints, num_int, sizeof(unsigned int)); // Populate the seeds array from the large list of integers
gsl_ran_shuffle(rseed, seeds, user_params->N_THREADS, sizeof(unsigned int)); // Shuffle the randomly selected integers
int checker;
checker = 0;
// seed the random number generators
for (thread_num = 0; thread_num < user_params->N_THREADS; thread_num++){
switch (checker){
case 0:
r[thread_num] = gsl_rng_alloc(gsl_rng_mt19937);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 1:
r[thread_num] = gsl_rng_alloc(gsl_rng_gfsr4);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 2:
r[thread_num] = gsl_rng_alloc(gsl_rng_cmrg);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 3:
r[thread_num] = gsl_rng_alloc(gsl_rng_mrg);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
case 4:
r[thread_num] = gsl_rng_alloc(gsl_rng_taus2);
gsl_rng_set(r[thread_num], seeds[thread_num]);
break;
} // end switch
checker += 1;
if(checker==5) {
checker = 0;
}
}
free(many_ints);
// allocate array for the k-space and real-space boxes
fftwf_complex *HIRES_box = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
fftwf_complex *HIRES_box_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// allocate array for the k-space and real-space boxes for vcb
fftwf_complex *HIRES_box_vcb_saved;
// HIRES_box_vcb_saved may be needed if FFTW_Wisdom doesn't exist -- currently unused
// but I am not going to allocate it until I am certain I needed it.
// find factor of HII pixel size / deltax pixel size
f_pixel_factor = user_params->DIM/(float)user_params->HII_DIM;
// ************ END INITIALIZATION ****************** //
LOG_DEBUG("Finished initialization.");
// ************ CREATE K-SPACE GAUSSIAN RANDOM FIELD *********** //
init_ps();
#pragma omp parallel shared(HIRES_box,r) \
private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,p,a,b,p_vcb) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
// convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
// convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
// since physical space field is real, only half contains independent modes
for (n_z=0; n_z<=MIDDLE; n_z++){
// convert index to numerical value for this component of the k-mode: k = (2*pi/L) * n
k_z = n_z * DELTA_K;
// now get the power spectrum; remember, only the magnitude of k counts (due to issotropy)
// this could be used to speed-up later maybe
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
p = power_in_k(k_mag);
// ok, now we can draw the values of the real and imaginary part
// of our k entry from a Gaussian distribution
if(user_params->NO_RNG) {
a = 1.0;
b = -1.0;
}
else {
a = gsl_ran_ugaussian(r[omp_get_thread_num()]);
b = gsl_ran_ugaussian(r[omp_get_thread_num()]);
}
HIRES_box[C_INDEX(n_x, n_y, n_z)] = sqrt(VOLUME*p/2.0) * (a + b*I);
}
}
}
}
LOG_DEBUG("Drawn random fields.");
// ***** Adjust the complex conjugate relations for a real array ***** //
adj_complex_conj(HIRES_box,user_params,cosmo_params);
memcpy(HIRES_box_saved, HIRES_box, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// FFT back to real space
int stat = dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
if(stat>0) Throw(stat);
LOG_DEBUG("FFT'd hires boxes.");
#pragma omp parallel shared(boxes,HIRES_box) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
*((float *)boxes->hires_density + R_INDEX(i,j,k)) = *((float *)HIRES_box + R_FFT_INDEX(i,j,k))/VOLUME;
}
}
}
}
// *** If required, let's also create a lower-resolution version of the density field *** //
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Only filter if we are perturbing on the low-resolution grid
if(!user_params->PERTURB_ON_HIGH_RES) {
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
// FFT back to real space
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
// Renormalise the FFT'd box (sample the high-res box if we are perturbing on the low-res grid)
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
boxes->lowres_density[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)))/VOLUME;
}
}
}
}
}
// ******* Relative Velocity part ******* //
if(user_params->USE_RELATIVE_VELOCITIES){
//JBM: We use the memory allocated to HIRES_box as it's free.
for(ii=0;ii<3;ii++) {
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
#pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,p,p_vcb) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
p = power_in_k(k_mag);
p_vcb = power_in_vcb(k_mag);
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
HIRES_box[0] = 0;
}
else{
if(ii==0) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_x/k_mag * sqrt(p_vcb/p) * C_KMS;
}
if(ii==1) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_y/k_mag * sqrt(p_vcb/p) * C_KMS;
}
if(ii==2) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= I * k_z/k_mag * sqrt(p_vcb/p) * C_KMS;
}
}
}
}
}
}
//we only care about the lowres vcb box, so we filter it directly.
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
//fft each velocity component back to real space
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k,vcb_i) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
vcb_i = *((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
boxes->lowres_vcb[HII_R_INDEX(i,j,k)] += vcb_i*vcb_i;
}
}
}
}
}
//now we take the sqrt of that and normalize the FFT
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
boxes->lowres_vcb[HII_R_INDEX(i,j,k)] = sqrt(boxes->lowres_vcb[HII_R_INDEX(i,j,k)])/VOLUME;
}
}
}
}
LOG_DEBUG("Completed Relative velocities.");
// ******* End of Relative Velocity part ******* //
// Now look at the velocities
for(ii=0;ii<3;ii++) {
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Now let's set the velocity field/dD/dt (in comoving Mpc)
#pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
HIRES_box[0] = 0;
}
else{
if(ii==0) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_x*I/k_sq/VOLUME;
}
if(ii==1) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_y*I/k_sq/VOLUME;
}
if(ii==2) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_z*I/k_sq/VOLUME;
}
}
}
}
}
}
// Filter only if we require perturbing on the low-res grid
if(!user_params->PERTURB_ON_HIGH_RES) {
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
// now sample to lower res
// now sample the filtered box
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
if(ii==0) {
boxes->hires_vx[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==1) {
boxes->hires_vy[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==2) {
boxes->hires_vz[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
}
else {
if(ii==0) {
boxes->lowres_vx[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==1) {
boxes->lowres_vy[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==2) {
boxes->lowres_vz[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
}
}
}
}
}
}
LOG_DEBUG("Done Inverse FT.");
// * *************************************************** * //
// * BEGIN 2LPT PART * //
// * *************************************************** * //
// Generation of the second order Lagrangian perturbation theory (2LPT) corrections to the ZA
// reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D
// Parameter set in ANAL_PARAMS.H
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
// use six supplementary boxes to store the gradients of phi_1 (eq. D13b)
// Allocating the boxes
#define PHI_INDEX(i, j) ((int) ((i) - (j)) + 3*((j)) - ((int)(j))/2 )
// ij -> INDEX
// 00 -> 0
// 11 -> 3
// 22 -> 5
// 10 -> 1
// 20 -> 2
// 21 -> 4
fftwf_complex *phi_1[6];
for(i = 0; i < 3; ++i){
for(j = 0; j <= i; ++j){
phi_1[PHI_INDEX(i, j)] = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
}
for(i = 0; i < 3; ++i){
for(j = 0; j <= i; ++j){
// read in the box
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// generate the phi_1 boxes in Fourier transform
#pragma omp parallel shared(HIRES_box,phi_1) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
float k[] = {k_x, k_y, k_z};
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
phi_1[PHI_INDEX(i, j)][0] = 0;
}
else{
phi_1[PHI_INDEX(i, j)][C_INDEX(n_x,n_y,n_z)] = -k[i]*k[j]*HIRES_box[C_INDEX(n_x, n_y, n_z)]/k_sq/VOLUME;
// note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT
}
}
}
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, phi_1[PHI_INDEX(i, j)]);
}
}
// Then we will have the laplacian of phi_2 (eq. D13b)
// After that we have to return in Fourier space and generate the Fourier transform of phi_2
int m, l;
#pragma omp parallel shared(HIRES_box,phi_1) private(i,j,k,m,l) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
*( (float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k))) = 0.0;
for(m = 0; m < 3; ++m){
for(l = m+1; l < 3; ++l){
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)) ) += \
( *((float *)(phi_1[PHI_INDEX(l, l)]) + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k))) ) * \
( *((float *)(phi_1[PHI_INDEX(m, m)]) + R_FFT_INDEX((unsigned long long)(i)
,(unsigned long long)(j)
,(unsigned long long)(k))) );
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)) ) -= \
( *((float *)(phi_1[PHI_INDEX(l, m)]) + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k))) ) * \
( *((float *)(phi_1[PHI_INDEX(l, m)]) + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k))) );
}
}
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),(unsigned long long)(j),(unsigned long long)(k)) ) /= TOT_NUM_PIXELS;
}
}
}
}
// Perform FFTs
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
memcpy(HIRES_box_saved, HIRES_box, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Now we can store the content of box in a back-up array
// Then we can generate the gradients of phi_2 (eq. D13b and D9)
// ***** Store back-up k-box RHS eq. D13b ***** //
// For each component, we generate the velocity field (same as the ZA part)
// Now let's set the velocity field/dD/dt (in comoving Mpc)
// read in the box
// TODO correct free of phi_1
for(ii=0;ii<3;ii++) {
if(ii>0) {
memcpy(HIRES_box, HIRES_box_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
#pragma omp parallel shared(HIRES_box,ii) private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
// set velocities/dD/dt
for (n_x=0; n_x<user_params->DIM; n_x++){
if (n_x>MIDDLE)
k_x =(n_x-user_params->DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->DIM; n_y++){
if (n_y>MIDDLE)
k_y =(n_y-user_params->DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)){ // DC mode
HIRES_box[0] = 0;
}
else{
if(ii==0) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_x*I/k_sq;
}
if(ii==1) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_y*I/k_sq;
}
if(ii==2) {
HIRES_box[C_INDEX(n_x,n_y,n_z)] *= k_z*I/k_sq;
}
}
}
// note the last factor of 1/VOLUME accounts for the scaling in real-space, following the FFT
}
}
}
// Filter only if we require perturbing on the low-res grid
if(!user_params->PERTURB_ON_HIGH_RES) {
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_box, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_box);
// now sample to lower res
// now sample the filtered box
#pragma omp parallel shared(boxes,HIRES_box,f_pixel_factor,ii,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
if(ii==0) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==1) {
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
if(ii==2) {
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i),
(unsigned long long)(j),
(unsigned long long)(k)));
}
}
else {
if(ii==0) {
boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==1) {
boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
if(ii==2) {
boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] =
*((float *)HIRES_box + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)));
}
}
}
}
}
}
}
// deallocate the supplementary boxes
for(i = 0; i < 3; ++i){
for(j = 0; j <= i; ++j){
fftwf_free(phi_1[PHI_INDEX(i,j)]);
}
}
}
LOG_DEBUG("Done 2LPT.");
// * *********************************************** * //
// * END 2LPT PART * //
// * *********************************************** * //
fftwf_cleanup_threads();
fftwf_cleanup();
fftwf_forget_wisdom();
// deallocate
fftwf_free(HIRES_box);
fftwf_free(HIRES_box_saved);
free_ps();
for (i=0; i<user_params->N_THREADS; i++) {
gsl_rng_free (r[i]);
}
gsl_rng_free(rseed);
LOG_DEBUG("Cleaned Up.");
} // End of Try{}
Catch(status){
return(status);
}
return(0);
}
|
formal-actual-args-1.c | #include <assert.h>
struct Cube
{
int x;
int y;
int z;
};
#pragma omp declare target
int
foo (short a)
{
switch (a)
{
case 1:
return 11;
break;
case 33:
return 333;
break;
case 55:
return 55;
break;
default:
return -1;
}
}
int
bar (int a)
{
int *ptr = &a;
*ptr = 100;
return a + *ptr;
}
struct Cube
baz (struct Cube c)
{
c.x = 11;
return c;
}
#pragma omp end declare target
#define s 100
int
main (int argc)
{
/* Test 1: argument types: char to short. */
int array[s];
#pragma omp target map(tofrom : array[ : s])
{
for (char i = 0; i < s; i++)
array[i] = foo (i);
}
for (int i = 0; i < s; i++)
assert (array[i] == foo (i));
/* Test 2: argument address is taken. */
int v = 2;
#pragma omp target map(tofrom : v)
v = bar (v);
assert (v == 200);
/* Test 3: passing a structure as a function argument. */
struct Cube r;
struct Cube c = {.x = 1, .y = 2, .z = 3};
#pragma omp target map(to : r) map(from : c)
r = baz (c);
assert (r.x == 11);
assert (r.y == c.y);
assert (r.z == c.z);
}
|
GB_unop__identity_fc32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_uint16
// op(A') function: GB_unop_tran__identity_fc32_uint16
// C type: GxB_FC32_t
// A type: uint16_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_uint16
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residual_based_pseudo_static_displacement_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME )
#define KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/residual_based_bossak_displacement_scheme.hpp"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedPseudoStaticDisplacementScheme
* @ingroup KratosCore
* @brief This is a pseudo-static scheme
* @details For pseudo–static strategy: calculate the constant matrices D = Beta * M, "set" M = 0 after initializing the damping matrix
* @note Based on Riccardo Rossi PhD Thesis: Light weight Structures: Structural Analysis and Coupling Issues
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace >
class ResidualBasedPseudoStaticDisplacementScheme
: public ResidualBasedBossakDisplacementScheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedPseudoStaticDisplacementScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedBossakDisplacementScheme<TSparseSpace,TDenseSpace> DerivedBaseType;
typedef typename BaseType::LocalSystemComponents LocalSystemComponentsType;
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The pseudo static scheme (parameters)
* @param ThisParameters Parameters with the Rayleigh variable
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(Parameters ThisParameters)
: DerivedBaseType(0.0),
mRayleighBeta(NODAL_MAUX)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedPseudoStaticDisplacementScheme",
"rayleigh_beta_variable" : "RAYLEIGH_BETA"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
mRayleighBeta = KratosComponents<Variable<double>>::Get(ThisParameters["rayleigh_beta_variable"].GetString());
}
/**
* @brief Default constructor. The pseudo static scheme
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(const Variable<double>& RayleighBetaVariable)
:DerivedBaseType(0.0),
mRayleighBeta(RayleighBetaVariable)
{
}
/** Copy Constructor.
*/
explicit ResidualBasedPseudoStaticDisplacementScheme(ResidualBasedPseudoStaticDisplacementScheme& rOther)
:DerivedBaseType(rOther),
mRayleighBeta(rOther.mRayleighBeta)
{
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedPseudoStaticDisplacementScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedPseudoStaticDisplacementScheme() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
DerivedBaseType::mpDofUpdater->UpdateDofs(rDofSet, rDx);
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
array_1d<double, 3 > delta_displacement;
#pragma omp parallel for private(delta_displacement)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
noalias(delta_displacement) = it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
noalias(r_current_velocity) = (DerivedBaseType::mBossak.c1 * delta_displacement - DerivedBaseType::mBossak.c4 * r_previous_velocity);
}
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Delta time
const double delta_time = r_current_process_info[DELTA_TIME];
// Updating time derivatives (nodally for efficiency)
const auto it_node_begin = rModelPart.Nodes().begin();
const int num_nodes = static_cast<int>(rModelPart.NumberOfNodes());
// Auxiliar variables
const array_1d<double, 3> zero_array = ZeroVector(3);
array_1d<double, 3 > delta_displacement = zero_array;
bool predicted_x, predicted_y, predicted_z;
// Getting position
const int disppos_x = it_node_begin->HasDofFor(DISPLACEMENT_X) ? it_node_begin->GetDofPosition(DISPLACEMENT_X) : -1;
const int velpos_x = it_node_begin->HasDofFor(VELOCITY_X) ? it_node_begin->GetDofPosition(VELOCITY_X) : -1;
const int disppos_y = it_node_begin->HasDofFor(DISPLACEMENT_Y) ? it_node_begin->GetDofPosition(DISPLACEMENT_Y) : -1;
const int velpos_y = it_node_begin->HasDofFor(VELOCITY_Y) ? it_node_begin->GetDofPosition(VELOCITY_Y) : -1;
const int disppos_z = it_node_begin->HasDofFor(DISPLACEMENT_Z) ? it_node_begin->GetDofPosition(DISPLACEMENT_Z) : -1;
const int velpos_z = it_node_begin->HasDofFor(VELOCITY_Z) ? it_node_begin->GetDofPosition(VELOCITY_Z) : -1;
#pragma omp parallel for private(delta_displacement, predicted_x, predicted_y, predicted_z)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
predicted_x = false;
predicted_y = false;
predicted_z = false;
//Predicting: r_current_displacement = r_previous_displacement + r_previous_velocity * delta_time;
//ATTENTION::: the prediction is performed only on free nodes
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
const array_1d<double, 3>& r_previous_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3>& r_current_acceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3>& r_current_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
if (velpos_x > -1) {
if (it_node->GetDof(VELOCITY_X, velpos_x).IsFixed()) {
delta_displacement[0] = (r_current_velocity[0] + DerivedBaseType::mBossak.c4 * r_previous_velocity[0])/DerivedBaseType::mBossak.c1;
r_current_displacement[0] = r_previous_displacement[0] + delta_displacement[0];
predicted_x = true;
}
}
if (disppos_x > -1 && !predicted_x) {
if (!it_node->GetDof(DISPLACEMENT_X, disppos_x).IsFixed() && !predicted_x) {
r_current_displacement[0] = r_previous_displacement[0] + delta_time * r_previous_velocity[0];
}
}
if (velpos_y > -1) {
if (it_node->GetDof(VELOCITY_Y, velpos_y).IsFixed()) {
delta_displacement[1] = (r_current_velocity[1] + DerivedBaseType::mBossak.c4 * r_previous_velocity[1])/DerivedBaseType::mBossak.c1;
r_current_displacement[1] = r_previous_displacement[1] + delta_displacement[1];
predicted_y = true;
}
}
if (disppos_y > -1 && !predicted_y) {
if (!it_node->GetDof(DISPLACEMENT_Y, disppos_y).IsFixed() && !predicted_y) {
r_current_displacement[1] = r_previous_displacement[1] + delta_time * r_previous_velocity[1];
}
}
if (velpos_z > -1) {
if (it_node->GetDof(VELOCITY_Z, velpos_z).IsFixed()) {
delta_displacement[2] = (r_current_velocity[2] + DerivedBaseType::mBossak.c4 * r_previous_velocity[2])/DerivedBaseType::mBossak.c1;
r_current_displacement[2] = r_previous_displacement[2] + delta_displacement[2];
predicted_z = true;
}
}
if (disppos_z > -1 && !predicted_z) {
if (!it_node->GetDof(DISPLACEMENT_Z, disppos_z).IsFixed() && !predicted_z) {
r_current_displacement[2] = r_previous_displacement[2] + delta_time * r_previous_velocity[2];
}
}
// Updating time derivatives
noalias(r_current_acceleration) = zero_array;
noalias(r_current_velocity) = r_previous_velocity;
}
KRATOS_CATCH( "" );
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedPseudoStaticDisplacementScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info() << ". Considering the following damping variable " << mRayleighBeta;
}
///@}
///@name Friends
///@{
protected:
///@}
///@name Static Member Variables
///@{
///@}
///@name Protected Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief It adds the dynamic LHS contribution of the elements D*c1 + K
* @param rLHSContribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
// Adding damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) // if D matrix declared
noalias(rLHSContribution) += rD * DerivedBaseType::mBossak.c1;
else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
noalias(rLHSContribution) += rM * beta * DerivedBaseType::mBossak.c1;
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements b - D*v
* @param pElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element::Pointer pElement,
LocalSystemVectorType& rRHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) {
pElement->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= prod(rD, DerivedBaseType::mVector.v[this_thread]);
} else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
pElement->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= beta * prod(rM, DerivedBaseType::mVector.v[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the condition b - M*a - D*v
* @param pCondition The condition to compute
* @param rRHSContribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition::Pointer pCondition,
LocalSystemVectorType& rRHSContribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo
) override
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding damping contribution
// Damping contribution
if (rD.size1() != 0 && TDenseSpace::TwoNorm(rD) > ZeroTolerance) {
pCondition->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= prod(rD, DerivedBaseType::mVector.v[this_thread]);
} else if (rM.size1() != 0) {
const double beta = rCurrentProcessInfo[mRayleighBeta];
pCondition->GetFirstDerivativesVector(DerivedBaseType::mVector.v[this_thread], 0);
noalias(rRHSContribution) -= beta * prod(rM, DerivedBaseType::mVector.v[this_thread]);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Variable<double> mRayleighBeta; /// The Rayleigh Beta variable
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedPseudoStaticDisplacementScheme */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_PSEUDO_STATIC_DISPLACEMENT_SCHEME E defined */
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Example use of firstprivate()
*/
void foo(int * a, int n, int g)
{
int i;
#pragma omp parallel for private(i)
for (i=0;i<n;i++)
{
a[i] = a[i]+g;
}
}
int a[100];
int main()
{
int i;
int n = 100;
#pragma omp parallel for private(i)
for (i=0;i<n;i++)
{
a[i] = i;
}
foo(a, 100, 7);
for (i=0;i<n;i++)
{
printf("%d\n",a[i]);
}
return 0;
}
|
2pcf.c | // 2pcf: least squares two-point correlation function code
// ---
// author: Nicolas Tessore <nicolas.tessore@manchester.ac.uk>
// date: 19 Aug 2018
#define _XOPEN_SOURCE 600
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <math.h>
#include <complex.h>
#include <time.h>
#include <signal.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// LAPACK
extern void dptsv_(int*, int*, double*, double*, double*, int*, int*);
static inline double complex phase(unsigned n, double x, double y)
{
double complex w = (x + I*y)/(x - I*y);
double complex z = 1;
for(; n > 1; n -= 2)
z *= w;
if(n == 1)
z *= (x + I*y)/hypot(x, y);
return z;
}
static const int DW = 8;
int mapsort(const void* a, const void* b)
{
const double* x = a;
const double* y = b;
if(x[7] < y[7])
return -1;
if(x[7] > y[7])
return +1;
if(x[2] < y[2])
return -1;
if(x[2] > y[2])
return +1;
if(x[1] < y[1])
return -1;
if(x[1] > y[1])
return +1;
if(x[0] < y[0])
return -1;
if(x[0] > y[0])
return +1;
return 0;
}
static inline int index(double x, double y, double z, double s, int w, int h)
{
return (int)(z/s)*(w*h) + (int)(y/s)*w + (int)(x/s);
}
static inline int query(int q, const int ma[], int gx, int gy, int gz,
int gr, int* c, int v[])
{
int i, il, ih, j, jl, jh, k, kl, kh, l, m, n, p;
i = q/(gx*gy);
j = (q/gx)%gy;
k = q%gx;
il = i > gr ? i-gr : 0;
ih = i+gr < gz ? i+gr+1 : gz;
jl = j > gr ? j-gr : 0;
jh = j+gr < gy ? j+gr+1 : gy;
kl = k > gr ? k-gr : 0;
kh = k+gr < gx ? k+gr+1 : gx;
n = 0;
p = -1;
for(i = il; i < ih; ++i)
{
for(j = jl; j < jh; ++j)
{
k = (i*gy + j)*gx;
l = ma[k + kl];
m = ma[k + kh];
if(l == p)
p = (v[2*n-1] = m);
else
p = (v[2*n+0] = l, v[2*n+1] = m), ++n;
}
}
*c = n;
return q;
}
#include "io.c" // yes, really
static const char* ANIM[] = {
"\033[34m\xe2\xa0\xb7\033[0m", "\033[34m\xe2\xa0\xaf\033[0m",
"\033[34m\xe2\xa0\x9f\033[0m", "\033[34m\xe2\xa0\xbb\033[0m",
"\033[34m\xe2\xa0\xbd\033[0m", "\033[34m\xe2\xa0\xbe\033[0m"
};
static const int NANIM = sizeof(ANIM)/sizeof(*ANIM);
volatile sig_atomic_t AL;
volatile sig_atomic_t QQ;
void handler(int s)
{
AL = (s == SIGALRM);
QQ = (s == SIGQUIT);
signal(s, handler);
}
int main(int argc, char* argv[])
{
char* cfgfile;
struct config cfg;
bool xc, ls, sc, tc;
int nd;
double dl, dh, d0, dm, Dl, Dh;
double ui, uo;
int S1, S2;
int n1, n2;
double* c1;
double* c2;
double gs;
int gr;
double xl, xh, yl, yh, zl, zh;
int gx, gy, gz, ng;
int* ma;
double* N;
double* W;
double* Y;
double* A;
double* X;
time_t st;
int dt;
double nn;
int i, j;
char* bf, *nf, *sv, *sx;
if(isatty(fileno(stdout)))
{
bf = "\033[1m";
nf = "\033[0m";
sv = "\033[32m\xe2\x9c\x94\033[0m";
sx = "\033[31m\xe2\x9c\x98\033[0m";
}
else
{
bf = nf = "";
sv = sx = ">";
}
cfgfile = NULL;
memset(&cfg, 0, sizeof(cfg));
if(argc > 5)
goto err_usage;
if(argc > 1)
{
char** posarg[] = {
NULL,
&cfgfile,
&cfg.catalog1,
&cfg.catalog2,
&cfg.output
};
for(int c = 1; c < argc; ++c)
if(strcmp(argv[c], "--") != 0)
*posarg[c] = strdup(argv[c]);
}
if(!cfgfile)
cfgfile = strdup("2pcf.cfg");
readcfg(cfgfile, &cfg);
printf("\n");
printf("%sconfiguration file %s%s\n", bf, cfgfile, nf);
printf("\n");
printcfg(&cfg);
printf("\n");
xc = cfg.catalog2 != NULL;
sc = cfg.coords >= COORDS_LONLAT;
ls = cfg.spacing == SPACING_LOG;
ui = UCONV[cfg.dunit];
uo = UCONV[cfg.thunit];
nd = cfg.nth;
dl = cfg.thmin*uo;
dh = cfg.thmax*uo;
S1 = cfg.spin1;
S2 = cfg.spin2;
if(cfg.coords == COORDS_3D && (S1 != 0 || S2 != 0))
{
fprintf(stderr, "error: 3D fields must be spin 0\n");
return EXIT_FAILURE;
}
#ifdef _OPENMP
if(cfg.num_threads)
omp_set_num_threads(cfg.num_threads);
tc = cfg.thread_data == TDATA_COPY;
#else
tc = false;
#endif
if(sc)
{
dl = 2*sin(0.5*dl);
dh = 2*sin(0.5*dh);
}
if(ls)
{
d0 = log(dl);
dm = (nd - 1)/(log(dh) - d0);
}
else
{
d0 = dl;
dm = (nd - 1)/(dh - d0);
}
Dl = dl*dl;
Dh = dh*dh;
printf("%sread catalog%s%s\n", bf, xc ? " 1" : "", nf);
fflush(stdout);
c1 = readc(cfg.catalog1, cfg.coords, ui, cfg.field1, cfg.signs1, &n1);
printf("%s done with %d points\n", sv, n1);
printf("\n");
if(xc)
{
printf("%sread catalog 2%s\n", bf, nf);
fflush(stdout);
c2 = readc(cfg.catalog2, cfg.coords, ui, cfg.field2, cfg.signs2, &n2);
printf("%s done with %d points\n", sv, n2);
printf("\n");
}
else
{
c2 = c1;
n2 = n1;
S2 = S1;
}
printf("%sbuild index%s\n", bf, nf);
fflush(stdout);
gs = 0.25*dh;
gr = ceil(dh/gs);
xl = xh = c1[0];
yl = yh = c1[1];
zl = zh = c1[2];
for(i = 1; i < n1; ++i)
{
if(c1[i*DW+0] < xl) xl = c1[i*DW+0];
if(c1[i*DW+0] > xh) xh = c1[i*DW+0];
if(c1[i*DW+1] < yl) yl = c1[i*DW+1];
if(c1[i*DW+1] > yh) yh = c1[i*DW+1];
if(c1[i*DW+2] < zl) zl = c1[i*DW+2];
if(c1[i*DW+2] > zh) zh = c1[i*DW+2];
}
if(xc)
{
for(i = 0; i < n2; ++i)
{
if(c2[i*DW+0] < xl) xl = c2[i*DW+0];
if(c2[i*DW+0] > xh) xh = c2[i*DW+0];
if(c2[i*DW+1] < yl) yl = c2[i*DW+1];
if(c2[i*DW+1] > yh) yh = c2[i*DW+1];
if(c2[i*DW+2] < zl) zl = c2[i*DW+2];
if(c2[i*DW+2] > zh) zh = c2[i*DW+2];
}
}
gx = floor((xh - xl)/gs) + 1;
gy = floor((yh - yl)/gs) + 1;
gz = floor((zh - zl)/gs) + 1;
ng = gx*gy*gz;
for(i = 0; i < n1; ++i)
c1[i*DW+7] =
index(c1[i*DW+0]-xl, c1[i*DW+1]-yl, c1[i*DW+2]-zl, gs, gx, gy);
qsort(c1, n1, DW*sizeof(double), mapsort);
if(xc)
{
for(i = 0; i < n2; ++i)
c2[i*DW+7] =
index(c2[i*DW+0]-xl, c2[i*DW+1]-yl, c2[i*DW+2]-zl, gs, gx, gy);
qsort(c2, n2, DW*sizeof(double), mapsort);
}
ma = malloc((ng+1)*sizeof(int));
if(!ma)
goto err_alloc;
for(i = 0, j = 0; i < ng; ++i)
{
while(j < n2 && c2[j*DW+7] < i)
j += 1;
ma[i] = j;
}
ma[ng] = n2;
printf("%s done with %d x %d x %d grid cells\n", sv, gx, gy, gz);
printf("\n");
N = calloc(nd, sizeof(double));
W = calloc(nd*2, sizeof(double));
Y = calloc(nd*4, sizeof(double));
if(!N || !W || !Y)
goto err_alloc;
signal(SIGALRM, handler);
signal(SIGQUIT, handler);
AL = QQ = 0;
printf("%scorrelations%s\n", bf, nf);
fflush(stdout);
st = time(NULL);
dt = 0;
#pragma omp parallel default(none) shared(st, dt, N, W, Y, AL, QQ) \
private(i, j) firstprivate(xc, ls, sc, tc, nd, d0, dm, Dl, Dh, gr, \
gx, gy, gz, ng, n1, n2, c1, c2, ma, S1, S2, ANIM, NANIM, stdout)
{
int qc, jh;
int q, nq;
int* qr;
double* c1_;
double* c2_;
int* ma_;
double* N_;
double* W_;
double* Y_;
bool fb;
nq = 0;
qr = malloc((2*gr+1)*(2*gr+1)*2*sizeof(int));
if(!qr)
perror(NULL), abort();
if(tc)
{
c1_ = malloc(n1*DW*sizeof(double));
if(xc)
c2_ = malloc(n2*DW*sizeof(double));
else
c2_ = c1_;
ma_ = malloc((ng+1)*sizeof(int));
if(!c1_ || !c2_ || !ma_)
perror(NULL), abort();
memcpy(c1_, c1, n1*DW*sizeof(double));
if(xc)
memcpy(c2_, c2, n2*DW*sizeof(double));
memcpy(ma_, ma, (ng+1)*sizeof(int));
}
else
{
c1_ = c1;
c2_ = c2;
ma_ = ma;
}
N_ = calloc(nd, sizeof(double));
W_ = calloc(nd*2, sizeof(double));
Y_ = calloc(nd*4, sizeof(double));
if(!N_ || !W_ || !Y_)
perror(NULL), abort();
fb = false;
#pragma omp master
if(isatty(fileno(stdout)))
{
fb = true;
AL = false;
alarm(1);
#ifdef _OPENMP
printf("\r%s %d thread(s) ", ANIM[0], omp_get_num_threads());
fflush(stdout);
#endif
}
qc = -1;
#pragma omp for schedule(static, 1) nowait
for(i = 0; i < n1; ++i)
{
const double xi = c1_[i*DW+0];
const double yi = c1_[i*DW+1];
const double zi = c1_[i*DW+2];
const double ui = c1_[i*DW+3];
const double vi = c1_[i*DW+4];
const double wi = c1_[i*DW+5];
const int qi = c1_[i*DW+7];
if(QQ)
continue;
if(AL && fb)
{
dt = difftime(time(NULL), st);
printf("\r%s %.2f%%", ANIM[dt%NANIM], 100.*i/n1);
printf(" in %02d:%02d:%02d ", dt/3600, (dt/60)%60, dt%60);
fflush(stdout);
AL = false;
alarm(1);
}
if(qi != qc)
qc = query(qi, ma, gx, gy, gz, gr, &nq, qr);
for(q = 0; q < nq; ++q)
{
j = qr[2*q+0];
jh = qr[2*q+1];
if(!xc && j < i+1)
j = i+1;
for(; j < jh; ++j)
{
const double xj = c2_[j*DW+0];
const double yj = c2_[j*DW+1];
const double zj = c2_[j*DW+2];
const double uj = c2_[j*DW+3];
const double vj = c2_[j*DW+4];
const double wj = c2_[j*DW+5];
const double dx = xi - xj;
const double dy = yi - yj;
const double dz = zi - zj;
const double D = dx*dx + dy*dy + dz*dz;
if(D >= Dl && D < Dh)
{
double complex gi, gj, xip, xim;
double fl, fh, ww;
int nl, nh;
const double dp = xi*xj + sc*(yi*yj+zi*zj);
const double cp = xj*yi - yj*xi;
fl = dm*((ls ? 0.5*log(D) : sqrt(D)) - d0);
nl = floor(fl);
nh = nl + 1;
fl = nh - fl;
fh = 1 - fl;
ww = wi*wj;
gi = (ui + I*vi)*phase(S1, zi*dp - zj, cp);
gj = (uj + I*vj)*phase(S2, zi - zj*dp, cp);
xip = gi*conj(gj);
xim = gi*gj;
N_[nl] += fl;
N_[nh] += fh;
W_[0*nd+nl] += ww*fl*fl;
W_[0*nd+nh] += ww*fh*fh;
W_[1*nd+nl] += ww*fl*fh;
Y_[0*nd+nl] += ww*fl*creal(xip);
Y_[0*nd+nh] += ww*fh*creal(xip);
Y_[1*nd+nl] += ww*fl*creal(xim);
Y_[1*nd+nh] += ww*fh*creal(xim);
Y_[2*nd+nl] += ww*fl*cimag(xip);
Y_[2*nd+nh] += ww*fh*cimag(xip);
Y_[3*nd+nl] += ww*fl*cimag(xim);
Y_[3*nd+nh] += ww*fh*cimag(xim);
}
}
}
}
#pragma omp critical
{
for(int n = 0; n < nd; ++n)
{
N[n] += N_[n];
W[0*nd+n] += W_[0*nd+n];
W[1*nd+n] += W_[1*nd+n];
Y[0*nd+n] += Y_[0*nd+n];
Y[1*nd+n] += Y_[1*nd+n];
Y[2*nd+n] += Y_[2*nd+n];
Y[3*nd+n] += Y_[3*nd+n];
}
}
free(qr);
free(N_);
free(W_);
free(Y_);
if(tc)
{
free(c1_);
if(xc)
free(c2_);
free(ma_);
}
}
nn = 0;
for(int n = 0; n < nd; ++n)
nn += N[n];
dt = difftime(time(NULL), st);
if(isatty(fileno(stdin)))
printf("\r");
printf("%s done with %.0f pairs", sv, nn);
printf(" in %02d:%02d:%02d \n", dt/3600, (dt/60)%60, dt%60);
printf("\n");
A = calloc(nd*2, sizeof(double));
X = calloc(nd*4, sizeof(double));
if(!A || !X)
goto err_alloc;
memcpy(A, W, nd*2*sizeof(double));
memcpy(X, Y, nd*4*sizeof(double));
// solve A.X = Y
{
int n = nd, m = 4, err;
printf("%sleast squares%s\n", bf, nf);
fflush(stdout);
dptsv_(&n, &m, A, A+n, X, &n, &err);
if(!err)
printf("%s success\n", sv);
else
{
printf("%s error: ", sx);
if(err > 0)
printf("the %dx%d submatrix is not pos. def.", err, err);
else
printf("illegal argument");
printf("\n");
}
printf("\n");
}
output(cfg.output, nd, dl, dh, uo, sc, ls, N, X, W, Y);
free(N);
free(W);
free(Y);
free(A);
free(X);
free(ma);
free(c1);
if(xc)
free(c2);
free(cfgfile);
freecfg(&cfg);
return EXIT_SUCCESS;
err_usage:
fprintf(stderr, "usage: 2pcf [config] [catalog] [catalog2] [output]\n");
return EXIT_FAILURE;
err_alloc:
perror(NULL);
return EXIT_FAILURE;
}
|
GB_AxB_dot3.c | //------------------------------------------------------------------------------
// GB_AxB_dot3: compute C<M> = A'*B in parallel
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// This function only computes C<M>=A'*B. The mask must be present, and not
// complemented, either valued or structural. The mask is always applied.
#include "GB_mxm.h"
#ifndef GBCOMPACT
#include "GB_AxB__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_FREE (TaskList) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_MATRIX_FREE (Chandle) ; \
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_AxB_dot3 // C<M> = A'*B using dot product method
(
GrB_Matrix *Chandle, // output matrix
const GrB_Matrix M, // mask matrix
const bool Mask_struct, // if true, use the only structure of M
const GrB_Matrix A, // input matrix
const GrB_Matrix B, // input matrix
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT_MATRIX_OK (M, "M for dot3 A'*B", GB0) ;
ASSERT_MATRIX_OK (A, "A for dot3 A'*B", GB0) ;
ASSERT_MATRIX_OK (B, "B for dot3 A'*B", GB0) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_SEMIRING_OK (semiring, "semiring for numeric A'*B", GB0) ;
ASSERT (A->vlen == B->vlen) ;
int ntasks, max_ntasks = 0, nthreads ;
GB_task_struct *TaskList = NULL ;
//--------------------------------------------------------------------------
// get the semiring operators
//--------------------------------------------------------------------------
GrB_BinaryOp mult = semiring->multiply ;
GrB_Monoid add = semiring->add ;
ASSERT (mult->ztype == add->op->ztype) ;
bool op_is_first = mult->opcode == GB_FIRST_opcode ;
bool op_is_second = mult->opcode == GB_SECOND_opcode ;
bool op_is_pair = mult->opcode == GB_PAIR_opcode ;
bool A_is_pattern = false ;
bool B_is_pattern = false ;
if (flipxy)
{
// z = fmult (b,a) will be computed
A_is_pattern = op_is_first || op_is_pair ;
B_is_pattern = op_is_second || op_is_pair ;
ASSERT (GB_IMPLIES (!A_is_pattern,
GB_Type_compatible (A->type, mult->ytype))) ;
ASSERT (GB_IMPLIES (!B_is_pattern,
GB_Type_compatible (B->type, mult->xtype))) ;
}
else
{
// z = fmult (a,b) will be computed
A_is_pattern = op_is_second || op_is_pair ;
B_is_pattern = op_is_first || op_is_pair ;
ASSERT (GB_IMPLIES (!A_is_pattern,
GB_Type_compatible (A->type, mult->xtype))) ;
ASSERT (GB_IMPLIES (!B_is_pattern,
GB_Type_compatible (B->type, mult->ytype))) ;
}
(*Chandle) = NULL ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Mp = M->p ;
const int64_t *GB_RESTRICT Mh = M->h ;
const int64_t *GB_RESTRICT Mi = M->i ;
const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ;
const size_t msize = M->type->size ;
const int64_t mvlen = M->vlen ;
const int64_t mvdim = M->vdim ;
const int64_t mnz = GB_NNZ (M) ;
const int64_t mnvec = M->nvec ;
const bool M_is_hyper = M->is_hyper ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
// const int64_t *GB_RESTRICT Ai = A->i ;
// const int64_t avlen = A->vlen ;
// const int64_t avdim = A->vdim ;
// const int64_t anz = GB_NNZ (A) ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = A->is_hyper ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
// const int64_t *GB_RESTRICT Bi = B->i ;
// const int64_t bvlen = B->vlen ;
// const int64_t bvdim = B->vdim ;
// const int64_t bnz = GB_NNZ (B) ;
const int64_t bnvec = B->nvec ;
const bool B_is_hyper = B->is_hyper ;
//--------------------------------------------------------------------------
// allocate C, the same size and # of entries as M
//--------------------------------------------------------------------------
GrB_Type ctype = add->op->ztype ;
int64_t cvlen = mvlen ;
int64_t cvdim = mvdim ;
int64_t cnz = mnz ;
int64_t cnvec = mnvec ;
info = GB_create (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true,
GB_SAME_HYPER_AS (M_is_hyper), M->hyper_ratio, cnvec,
cnz+1, // add one to cnz for GB_cumsum of Cwork in GB_AxB_dot3_slice
true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_ALL ;
return (info) ;
}
GrB_Matrix C = (*Chandle) ;
int64_t *GB_RESTRICT Cp = C->p ;
int64_t *GB_RESTRICT Ch = C->h ;
int64_t *GB_RESTRICT Cwork = C->i ; // use C->i as workspace
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// copy Mp and Mh into C
//--------------------------------------------------------------------------
// FUTURE:: C->p and C->h could be shallow copies of M->p and M->h, which
// could same some time and memory if C is then, say, transposed by
// GB_accum_mask later on.
nthreads = GB_nthreads (cnvec, chunk, nthreads_max) ;
GB_memcpy (Cp, Mp, (cnvec+1) * sizeof (int64_t), nthreads) ;
if (M_is_hyper)
{
GB_memcpy (Ch, Mh, cnvec * sizeof (int64_t), nthreads) ;
}
C->magic = GB_MAGIC ;
C->nvec_nonempty = M->nvec_nonempty ;
C->nvec = M->nvec ;
//--------------------------------------------------------------------------
// construct the tasks for the first phase
//--------------------------------------------------------------------------
nthreads = GB_nthreads (cnz, chunk, nthreads_max) ;
GB_OK (GB_AxB_dot3_one_slice (&TaskList, &max_ntasks, &ntasks, &nthreads,
M, Context)) ;
//--------------------------------------------------------------------------
// phase1: estimate the work to compute each entry in C
//--------------------------------------------------------------------------
// The work to compute C(i,j) is held in Cwork [p], if C(i,j) appears in
// as the pth entry in C.
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
// GB_GET_TASK_DESCRIPTOR ;
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
}
int64_t bpleft = 0 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C and M
//------------------------------------------------------------------
int64_t j = (Mh == NULL) ? k : Mh [k] ;
GB_GET_VECTOR (pM, pM_end, pM, pM_end, Mp, k) ;
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB, pB_end ;
GB_lookup (B_is_hyper, Bh, Bp, &bpleft, bnvec-1, j, &pB, &pB_end) ;
int64_t bjnz = pB_end - pB ;
//------------------------------------------------------------------
// estimate the work to compute each entry of C(:,j)
//------------------------------------------------------------------
// A decent estimate of the work to compute the dot product C(i,j)
// = A(:,i)'*B(:,j) is min (|A(:,i)|, |B(:,j)|) + 1. This is a
// lower bound. The actual work could require a binary search of
// either A(:,i) or B(:,j), or a merge of the two vectors. Or it
// could require no work at all if all entries in A(:,i) appear
// before all entries in B(:,j), or visa versa. No work is done if
// M(i,j)=0. A more accurate estimate is possible to compute,
// following the different methods used in
// Template/GB_AxB_dot_cij.c.
if (bjnz == 0)
{
// B(:,j) is empty, so C(:,j) is empty as well. No work is to
// be done, but it still takes unit work to flag each C(:,j) as
// a zombie
for ( ; pM < pM_end ; pM++)
{
Cwork [pM] = 1 ;
}
}
else
{
int64_t apleft = 0 ;
for ( ; pM < pM_end ; pM++)
{
int64_t work = 1 ;
if (GB_mcast (Mx, pM, msize))
{
int64_t pA, pA_end, i = Mi [pM] ;
GB_lookup (A_is_hyper, Ah, Ap, &apleft, anvec-1, i,
&pA, &pA_end) ;
int64_t ajnz = pA_end - pA ;
work += GB_IMIN (ajnz, bjnz) ;
}
Cwork [pM] = work ;
}
}
}
}
//--------------------------------------------------------------------------
// free the current tasks and construct the tasks for the second phase
//--------------------------------------------------------------------------
GB_FREE (TaskList) ;
GB_OK (GB_AxB_dot3_slice (&TaskList, &max_ntasks, &ntasks, &nthreads,
C, Context)) ;
GBBURBLE ("nthreads %d ntasks %d ", nthreads, ntasks) ;
//--------------------------------------------------------------------------
// C<M> = A'*B, via masked dot product method and built-in semiring
//--------------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//----------------------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------------------
#define GB_Adot3B(add,mult,xname) GB_Adot3B_ ## add ## mult ## xname
#define GB_AxB_WORKER(add,mult,xname) \
{ \
info = GB_Adot3B (add,mult,xname) (C, M, Mask_struct, \
A, A_is_pattern, B, B_is_pattern, \
TaskList, ntasks, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------------------
GB_Opcode mult_opcode, add_opcode ;
GB_Type_code xcode, ycode, zcode ;
if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring,
flipxy, &mult_opcode, &add_opcode, &xcode, &ycode, &zcode))
{
#include "GB_AxB_factory.c"
}
#endif
//--------------------------------------------------------------------------
// C<M> = A'*B, via masked dot product method and typecasting
//--------------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "generic ") ;
//----------------------------------------------------------------------
// get operators, functions, workspace, contents of A, B, C, and M
//----------------------------------------------------------------------
GxB_binary_function fmult = mult->function ;
GxB_binary_function fadd = add->op->function ;
size_t csize = C->type->size ;
size_t asize = A_is_pattern ? 0 : A->type->size ;
size_t bsize = B_is_pattern ? 0 : B->type->size ;
size_t xsize = mult->xtype->size ;
size_t ysize = mult->ytype->size ;
// scalar workspace: because of typecasting, the x/y types need not
// be the same as the size of the A and B types.
// flipxy false: aki = (xtype) A(k,i) and bkj = (ytype) B(k,j)
// flipxy true: aki = (ytype) A(k,i) and bkj = (xtype) B(k,j)
size_t aki_size = flipxy ? ysize : xsize ;
size_t bkj_size = flipxy ? xsize : ysize ;
GB_void *GB_RESTRICT terminal = (GB_void *) add->terminal ;
GB_cast_function cast_A, cast_B ;
if (flipxy)
{
// A is typecasted to y, and B is typecasted to x
cast_A = A_is_pattern ? NULL :
GB_cast_factory (mult->ytype->code, A->type->code) ;
cast_B = B_is_pattern ? NULL :
GB_cast_factory (mult->xtype->code, B->type->code) ;
}
else
{
// A is typecasted to x, and B is typecasted to y
cast_A = A_is_pattern ? NULL :
GB_cast_factory (mult->xtype->code, A->type->code) ;
cast_B = B_is_pattern ? NULL :
GB_cast_factory (mult->ytype->code, B->type->code) ;
}
//----------------------------------------------------------------------
// C<M> = A'*B via dot products, function pointers, and typecasting
//----------------------------------------------------------------------
// aki = A(k,i), located in Ax [pA]
#define GB_GETA(aki,Ax,pA) \
GB_void aki [GB_VLA(aki_size)] ; \
if (!A_is_pattern) cast_A (aki, Ax +((pA)*asize), asize)
// bkj = B(k,j), located in Bx [pB]
#define GB_GETB(bkj,Bx,pB) \
GB_void bkj [GB_VLA(bkj_size)] ; \
if (!B_is_pattern) cast_B (bkj, Bx +((pB)*bsize), bsize)
// break if cij reaches the terminal value
#define GB_DOT_TERMINAL(cij) \
if (terminal != NULL && memcmp (cij, terminal, csize) == 0) \
{ \
break ; \
}
// C(i,j) = A(i,k) * B(k,j)
#define GB_MULT(cij, aki, bkj) \
GB_FMULT (cij, aki, bkj)
// C(i,j) += A(i,k) * B(k,j)
#define GB_MULTADD(cij, aki, bkj) \
GB_void zwork [GB_VLA(csize)] ; \
GB_MULT (zwork, aki, bkj) ; \
fadd (cij, cij, zwork)
// define cij for each task
#define GB_CIJ_DECLARE(cij) \
GB_void cij [GB_VLA(csize)]
// address of Cx [p]
#define GB_CX(p) Cx +((p)*csize)
// save the value of C(i,j)
#define GB_CIJ_SAVE(cij,p) \
memcpy (GB_CX (p), cij, csize)
#define GB_ATYPE GB_void
#define GB_BTYPE GB_void
#define GB_CTYPE GB_void
// no vectorization
#define GB_PRAGMA_SIMD_VECTORIZE ;
#define GB_PRAGMA_SIMD_DOT(cij) ;
if (flipxy)
{
#define GB_FMULT(z,x,y) fmult (z,y,x)
#include "GB_AxB_dot3_template.c"
#undef GB_FMULT
}
else
{
#define GB_FMULT(z,x,y) fmult (z,x,y)
#include "GB_AxB_dot3_template.c"
#undef GB_FMULT
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
if (C->nzombies > 0) { if (!GB_queue_insert (C)) GB_PANIC ; } // TODO in 4.0: delete
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "dot3: C<M> = A'*B output", GB0) ;
ASSERT (*Chandle == C) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (!GB_PENDING (C)) ;
return (GrB_SUCCESS) ;
}
|
spmm_hicoo_sks_mat.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <HiParTI.h>
int main(int argc, char * const argv[]) {
char * mm_filename = NULL;
FILE *fi = NULL, *fo = NULL;
ptiSparseMatrix spA;
ptiSparseMatrixHiCOO hispA;
ptiMatrix B, C;
ptiElementIndex sb_bits = 7; // 2^7 by default
ptiIndex R = 16;
int niters = 5;
ptiTimer timer;
ptiNewTimer(&timer, 0);
/* OpenMP */
int cuda_dev_id = -2;
int nthreads = 1; // get from OMP_NUM_THREADS environment
ptiElementIndex sk_bits = sb_bits;
int use_schedule = 0; // privatization or not
int par_iters = 0; // determine in the code
ptiMatrix * Cbufs;
double min_time = 100000.0;
ptiElementIndex min_time_sk_bits;
static struct option long_options[] = {
{"input", required_argument, 0, 'i'},
{"output", optional_argument, 0, 'o'},
{"bs", optional_argument, 0, 'b'},
{"R", optional_argument, 0, 'r'},
{"cuda-dev-id", optional_argument, 0, 'd'},
{"use-schedule", optional_argument, 0, 'u'},
{0, 0, 0, 0}
};
for(;;) {
int option_index = 0;
int c = 1;
c = getopt_long(argc, argv, "i:o:b:r:d:u:", long_options, &option_index);
if(c == -1) {
break;
}
switch(c) {
case 'i':
mm_filename = optarg;
fi = fopen(optarg, "r");
ptiAssert(fi != NULL);
break;
case 'o':
fo = fopen(optarg, "w");
ptiAssert(fo != NULL);
break;
case 'b':
sscanf(optarg, "%"HIPARTI_SCN_ELEMENT_INDEX, &sb_bits);
break;
case 'r':
sscanf(optarg, "%"HIPARTI_SCN_INDEX, &R);
break;
case 'd':
sscanf(optarg, "%d", &cuda_dev_id);
break;
case 'u':
sscanf(optarg, "%d", &use_schedule);
break;
default:
abort();
}
}
printf("B ncols: %d\n", R);
printf("niters: %d\n", niters);
printf("cuda_dev_id: %d\n", cuda_dev_id);
if(cuda_dev_id == -1) {
printf("use_schedule: %d\n", use_schedule);
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
nthreads = omp_get_num_threads();
#endif
printf("nthreads: %d\n", nthreads);
}
if(optind > argc || argc < 2) {
printf("Usage: %s\n", argv[0]);
printf("Options: -i INPUT, --input=INPUT\n");
printf(" -o OUTPUT, --output=OUTPUT\n");
printf(" -b BLOCKSIZE (bits), --blocksize=BLOCKSIZE (bits)\n");
printf(" -R RANK\n");
printf(" -d CUDA_DEV_ID, --cuda-dev-id=DEV_ID\n");
printf(" -u use_schedule, --ur=use_schedule\n");
printf("\n");
return 1;
}
printf("Reading sparse matrix from file (%s) ...",mm_filename);
fflush(stdout);
ptiAssert(ptiLoadSparseMatrix(&spA, 1, fi) == 0);
fclose(fi);
printf(" done\n");
ptiSparseMatrixStatus(&spA, stdout);
// ptiAssert(ptiDumpSparseMatrix(&spA, 0, stdout) == 0);
ptiNewMatrix(&B, spA.ncols, R);
ptiRandomizeMatrix(&B);
ptiNewMatrix(&C, spA.nrows, R);
ptiConstantMatrix(&C, 0);
// ptiAssert(ptiDumpMatrix(&B, stdout) == 0);
// ptiAssert(ptiDumpMatrix(&C, stdout) == 0);
/* Set max_sk */
ptiIndex min_ndims = (spA.nrows > spA.ncols) ? spA.ncols : spA.nrows;
ptiElementIndex max_bits = (ptiElementIndex)(log2(min_ndims));
if(max_bits < sb_bits) {
sb_bits = max_bits;
sk_bits = max_bits;
}
if(cuda_dev_id == -2) max_bits = sk_bits; // only run one case for sequential code
printf("sb: %ld\n", (long int)pow(2,sb_bits));
#ifdef HIPARTI_USE_OPENMP
printf("max_bits: %d (%ld)\n", (int)(max_bits), (long int)pow(2,max_bits));
for(ptiElementIndex sk_bits = sb_bits; sk_bits <= max_bits; sk_bits += 1) {
printf("sk: %ld\n", (long int)pow(2,sk_bits));
#else
{
#endif
/* Convert to HiCOO */
ptiNnzIndex max_nnzb = 0;
ptiAssert(ptiSparseMatrixToHiCOO(&hispA, &max_nnzb, &spA, sb_bits, sk_bits) == 0); // TODO
ptiSparseMatrixStatusHiCOO(&hispA, stdout);
// ptiAssert(ptiDumpSparseMatrixHiCOO(&hispA, stdout) == 0);
/* determine niters or num_kernel_dim to be parallelized */
#ifdef HIPARTI_USE_OPENMP
ptiIndex sk = (ptiIndex)pow(2, hispA.sk_bits);
ptiIndex num_kernel_dim = (hispA.nrows + sk - 1) / sk;
printf("num_kernel_dim: %u, hispA.nkiters / num_kernel_dim: %u\n", num_kernel_dim, hispA.nkiters/num_kernel_dim);
if(num_kernel_dim <= NUM_CORES && hispA.nkiters / num_kernel_dim >= 20) {
par_iters = 1;
}
/* Set zeros for temporary CBufs */
char * bytestr;
if(cuda_dev_id == -1 && par_iters == 1) {
Cbufs = (ptiMatrix *)malloc(nthreads * sizeof(ptiMatrix));
for(int t=0; t<nthreads; ++t) {
ptiAssert(ptiNewMatrix(&Cbufs[t], hispA.nrows, R) == 0);
ptiAssert(ptiConstantMatrix(&Cbufs[t], 0) == 0);
}
ptiNnzIndex bytes = nthreads * hispA.nrows * R * sizeof(ptiValue);
bytestr = ptiBytesString(bytes);
printf("MATRIX BUFFER=%s\n\n", bytestr);
free(bytestr);
}
#endif
// Warm-up
if(cuda_dev_id == -2) {
printf("Run ptiSparseMatrixMulMatrixHiCOO:\n");
ptiSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
} else if(cuda_dev_id == -1) {
if(use_schedule == 1) {
if(par_iters == 0) {
printf("Run ptiOmpSparseMatrixMulMatrixHiCOO_Schedule:\n");
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule(&C, &hispA, &B);
} else {
printf("Run ptiOmpSparseMatrixMulMatrixHiCOO_Schedule_Reduce:\n");
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule_Reduce(&C, Cbufs, &hispA, &B);
}
} else {
printf("Run ptiOmpSparseMatrixMulMatrixHiCOO:\n");
ptiOmpSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
}
}
ptiStartTimer(timer);
for(int i=0; i<niters; ++i) {
if(cuda_dev_id == -2) {
ptiSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
} else if(cuda_dev_id == -1) {
if(use_schedule == 1) {
if(par_iters == 0) {
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule(&C, &hispA, &B);
} else {
ptiOmpSparseMatrixMulMatrixHiCOO_Schedule_Reduce(&C, Cbufs, &hispA, &B);
}
} else {
ptiOmpSparseMatrixMulMatrixHiCOO(&C, &hispA, &B);
}
}
}
ptiStopTimer(timer);
printf("\n");
double elapsed_time = ptiPrintAverageElapsedTime(timer, niters, "HiCOO-SpMM");
fflush(stdout);
/* Get the best perf */
if (min_time > elapsed_time) {
min_time = elapsed_time;
min_time_sk_bits = sk_bits;
}
ptiFreeSparseMatrixHiCOO(&hispA);
} // End Loop of sks
ptiNnzIndex flops = 2 * spA.nnz * R;
ptiPrintGFLOPS(min_time, flops, "HiCOO-SpMM");
if(cuda_dev_id == -1)
printf("min_time_sk_bits: %u (%ld) \n", (unsigned)min_time_sk_bits, (long int)pow(2,min_time_sk_bits));
if(fo != NULL) {
ptiAssert(ptiDumpMatrix(&C, fo) == 0);
fclose(fo);
}
#ifdef HIPARTI_USE_OPENMP
if(cuda_dev_id == -1 && par_iters == 1) {
for(int t=0; t<nthreads; ++t) {
ptiFreeMatrix(&Cbufs[t]);
}
free(Cbufs);
}
#endif
ptiFreeSparseMatrix(&spA);
ptiFreeMatrix(&B);
ptiFreeMatrix(&C);
ptiFreeTimer(timer);
return 0;
}
|
interp1.c | /*
* File: interp1.c
*
* MATLAB Coder version : 3.0
* C/C++ source code generated on : 15-Nov-2015 19:51:15
*/
/* Include Files */
#include "rt_nonfinite.h"
#include "yaapt.h"
#include "interp1.h"
#include "yaapt_emxutil.h"
#include "pchip.h"
/* Function Definitions */
/*
* Arguments : const emxArray_real_T *varargin_1
* const emxArray_real_T *varargin_2
* const emxArray_real_T *varargin_3
* emxArray_real_T *Vq
* Return Type : void
*/
void interp1(const emxArray_real_T *varargin_1, const emxArray_real_T
*varargin_2, const emxArray_real_T *varargin_3, emxArray_real_T *Vq)
{
emxArray_real_T *y;
int j2;
int nd2;
emxArray_real_T *x;
int nx;
unsigned int outsize[2];
emxArray_real_T *xi;
struct_T pp;
emxArray_real_T *b_y;
int exitg1;
int b_j1;
double xtmp;
int k;
int b_k;
double v;
int low_i;
int low_ip1;
int high_i;
int mid_i;
double xloc;
emxInit_real_T(&y, 2);
j2 = y->size[0] * y->size[1];
y->size[0] = 1;
y->size[1] = varargin_2->size[1];
emxEnsureCapacity((emxArray__common *)y, j2, (int)sizeof(double));
nd2 = varargin_2->size[0] * varargin_2->size[1];
for (j2 = 0; j2 < nd2; j2++) {
y->data[j2] = varargin_2->data[j2];
}
emxInit_real_T(&x, 2);
j2 = x->size[0] * x->size[1];
x->size[0] = 1;
x->size[1] = varargin_1->size[1];
emxEnsureCapacity((emxArray__common *)x, j2, (int)sizeof(double));
nd2 = varargin_1->size[0] * varargin_1->size[1];
for (j2 = 0; j2 < nd2; j2++) {
x->data[j2] = varargin_1->data[j2];
}
nx = varargin_1->size[1];
for (j2 = 0; j2 < 2; j2++) {
outsize[j2] = (unsigned int)varargin_3->size[j2];
}
j2 = Vq->size[0] * Vq->size[1];
Vq->size[0] = 1;
Vq->size[1] = (int)outsize[1];
emxEnsureCapacity((emxArray__common *)Vq, j2, (int)sizeof(double));
nd2 = (int)outsize[1];
for (j2 = 0; j2 < nd2; j2++) {
Vq->data[j2] = 0.0;
}
if (varargin_3->size[1] == 0) {
} else {
nd2 = 1;
emxInit_real_T(&xi, 2);
emxInitStruct_struct_T(&pp);
emxInit_real_T(&b_y, 2);
do {
exitg1 = 0;
if (nd2 <= nx) {
if (rtIsNaN(varargin_1->data[nd2 - 1])) {
exitg1 = 1;
} else {
nd2++;
}
} else {
if (varargin_1->data[1] < varargin_1->data[0]) {
j2 = nx >> 1;
for (b_j1 = 1; b_j1 <= j2; b_j1++) {
xtmp = x->data[b_j1 - 1];
x->data[b_j1 - 1] = x->data[nx - b_j1];
x->data[nx - b_j1] = xtmp;
}
nd2 = varargin_2->size[1] >> 1;
for (b_j1 = 1; b_j1 <= nd2; b_j1++) {
j2 = varargin_2->size[1] - b_j1;
xtmp = y->data[b_j1 - 1];
y->data[b_j1 - 1] = y->data[j2];
y->data[j2] = xtmp;
}
}
j2 = xi->size[0] * xi->size[1];
xi->size[0] = 1;
xi->size[1] = varargin_3->size[1];
emxEnsureCapacity((emxArray__common *)xi, j2, (int)sizeof(double));
nd2 = varargin_3->size[0] * varargin_3->size[1];
for (j2 = 0; j2 < nd2; j2++) {
xi->data[j2] = varargin_3->data[j2];
}
nd2 = y->size[1];
j2 = b_y->size[0] * b_y->size[1];
b_y->size[0] = 1;
b_y->size[1] = nd2;
emxEnsureCapacity((emxArray__common *)b_y, j2, (int)sizeof(double));
for (j2 = 0; j2 < nd2; j2++) {
b_y->data[b_y->size[0] * j2] = y->data[j2];
}
pchip(x, b_y, pp.breaks, pp.coefs);
nd2 = varargin_3->size[1];
#pragma omp parallel for \
num_threads(omp_get_max_threads()) \
private(b_k,low_i,v,low_ip1,high_i,xloc,mid_i)
for (k = 1; k <= nd2; k++) {
b_k = k;
if (rtIsNaN(xi->data[b_k - 1])) {
Vq->data[b_k - 1] = rtNaN;
} else {
if (rtIsNaN(xi->data[b_k - 1])) {
v = xi->data[b_k - 1];
} else {
low_i = 1;
low_ip1 = 2;
high_i = pp.breaks->size[1];
while (high_i > low_ip1) {
mid_i = (low_i >> 1) + (high_i >> 1);
if (((low_i & 1) == 1) && ((high_i & 1) == 1)) {
mid_i++;
}
if (xi->data[b_k - 1] >= pp.breaks->data[mid_i - 1]) {
low_i = mid_i;
low_ip1 = mid_i + 1;
} else {
high_i = mid_i;
}
}
xloc = xi->data[b_k - 1] - pp.breaks->data[low_i - 1];
v = pp.coefs->data[low_i - 1];
for (low_ip1 = 0; low_ip1 < 3; low_ip1++) {
v = xloc * v + pp.coefs->data[(low_i + (low_ip1 + 1) *
(pp.breaks->size[1] - 1)) - 1];
}
}
Vq->data[b_k - 1] = v;
}
}
exitg1 = 1;
}
} while (exitg1 == 0);
emxFree_real_T(&b_y);
emxFreeStruct_struct_T(&pp);
emxFree_real_T(&xi);
}
emxFree_real_T(&x);
emxFree_real_T(&y);
}
/*
* File trailer for interp1.c
*
* [EOF]
*/
|
DeclOpenMP.h | //===--- OpenMP.h - Classes for representing OpenMP directives ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file defines OpenMP nodes.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMP_H
#define LLVM_CLANG_AST_OPENMP_H
#include "clang/AST/DeclBase.h"
#include "llvm/ADT/ArrayRef.h"
namespace clang {
class DeclRefExpr;
/// \brief This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl : public Decl {
friend class ASTDeclReader;
unsigned NumVars;
virtual void anchor();
OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) :
Decl(DK, DC, L), NumVars(0) { }
ArrayRef<const DeclRefExpr *> getVars() const {
return ArrayRef<const DeclRefExpr *>(
reinterpret_cast<const DeclRefExpr * const *>(this + 1),
NumVars);
}
llvm::MutableArrayRef<DeclRefExpr *> getVars() {
return llvm::MutableArrayRef<DeclRefExpr *>(
reinterpret_cast<DeclRefExpr **>(this + 1),
NumVars);
}
void setVars(ArrayRef<DeclRefExpr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<DeclRefExpr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef llvm::MutableArrayRef<DeclRefExpr *>::iterator varlist_iterator;
typedef ArrayRef<const DeclRefExpr *>::iterator varlist_const_iterator;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
} // end namespace clang
#endif
|
GB_unaryop__identity_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_fp64
// op(A') function: GB_tran__identity_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_fp64
(
int8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DenseMatrix.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// OPENMP-BASED ASSIGNMENT KERNELS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 // Storage order of the right-hand side dense matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_t<MT1>;
using ET2 = ElementType_t<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 // Storage order of the right-hand side sparse matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ assign( a, b ); } );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// matrix. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ schurAssign( a, b ); } );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline auto smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
-> EnableIf_t< IsDenseMatrix_v<MT1> >
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
DenseMatrix.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2012-2017 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
assign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be added.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP addition assignment of a
// sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpAddAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
addAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment
// of a sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpSubAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
subAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP Schur product assignment of a dense matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix for the Schur product.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP Schur product assignment
// of a dense matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 > // Storage order of the right-hand side dense matrix
void smpSchurAssign_backend( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<aligned>( ~rhs, row, column, m, n ) );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP Schur product assignment of a sparse matrix to a dense
// matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix for the Schur product.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP Schur product assignment
// of a sparse matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 > // Storage order of the right-hand side sparse matrix
void smpSchurAssign_backend( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
schurAssign( target, submatrix<unaligned>( ~rhs, row, column, m, n ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// matrix. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSchurAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< IsDenseMatrix<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
SCAPE.h | #pragma once
#include "MatrixnD.h"
#include "SCAPE_structs.h"
#include "PCA_basis.h"
#include <umfpack/umfpack.h>
#include "operators/alignPointClouds.h"
#include "RandAccSparseMatrix.h"
#include <omp.h>
//#define SAVE_MATRIX 0
typedef SquareMatrixND<vec3d> SMat3D;
static const int numParts = 16;
static const int numJoints = 15;
static const int numT = 25000;
//! Can be used to store the rigid per part rotations.
struct PartRotations {
SMat3D R[numParts];
};
//! Can be used to store the rigid per joint rotation.
struct JointRotations {
vec3d rv[numJoints];
};
class SCAPE_Model {
public:
SCAPE_Model() {
NumericQ = NULL;
SymbolicQ = NULL;
NumericDGrad = NULL;
SymbolicDGrad = NULL;
}
~SCAPE_Model() {
if (NumericQ) umfpack_di_free_numeric(&NumericQ);
if (SymbolicQ) umfpack_di_free_symbolic(&SymbolicQ);
if (NumericDGrad) umfpack_di_free_numeric(&NumericDGrad);
if (SymbolicDGrad) umfpack_di_free_symbolic(&SymbolicDGrad);
}
//! Returns the two joints that define the non-rigid pose deformation of the triangle 'id'. If there is only one joint, the second joint ID will be set to -1!
vec2i getClosestJointsForTriangle(UInt id) const {
return triToJoints[id];
}
//! Returns the non-rigid pose deformation of the triangle 'id' for joint rotations 'j0_rot' and 'j1_rot'.
SMat3D estimateQfromJoints(UInt id, const vec3d& j0_rot, const vec3d& j1_rot) const {
SMat3D Qret;
for (int x = 0; x < 3; x++) {
for (int y = 0; y < 3; y++) {
const double* a = perTriangleParameters[id].vals[x][y];
Qret(x, y) = j0_rot.x * a[0] + j0_rot.y * a[1] + j0_rot.z * a[2] + j1_rot.x * a[3] + j1_rot.y * a[4] + j1_rot.z * a[5] + a[6];
}
}
return Qret;
}
RigidTransform<vec3d> computePartAlignment(const Part& part, const std::vector<vec3d>& template_verts, const std::vector<vec3d>& pose_verts) const {
std::vector<vec3d> pts_template;
std::vector<vec3d> pts_pose;
for (UInt i1 = 0; i1 < part.verts.size(); i1++) {
pts_template.push_back(template_verts[part.verts[i1]]);
pts_pose.push_back(pose_verts[part.verts[i1]]);
}
RigidTransform<vec3d> trans = rigidAlignPointClouds(&pts_pose[0], &pts_template[0], pts_template.size());
return trans;
}
std::vector<double> projectDToPCABasis(std::vector<SMat3D>& D, int cropToNBasisVectors = 0) const {
std::vector<double> dlin(numT * 9);
double* Dpoint = (double*)(&D[0]);
for (int i1 = 0; i1 < numT * 9; i1++) {
dlin[i1] = Dpoint[i1] - avg_defgrad[i1];
}
std::vector<double> inpca = pcabasis.projectIntoBasis(dlin);
if (cropToNBasisVectors != 0) {
for (UInt i1 = cropToNBasisVectors; i1 < inpca.size(); i1++) inpca[i1] = 0;
}
std::vector<double> reproj = pcabasis.projectFromBasis(inpca);
for (int i1 = 0; i1 < numT * 9; i1++) {
reproj[i1] += avg_defgrad[i1];
}
for (int i1 = 0; i1 < numT * 9; i1++) {
Dpoint[i1] = reproj[i1];
}
return inpca;
}
/// <summary>
/// Create a new instance with a given pose and shape.
///
/// The pose consists of part rotations R_l and per-triangle deformations Q_k.
/// The shape consists of per-triangle deformations S_k.
///
/// The preliminary resulting vertices are given by equation (6) in [1]:
///
/// v_{k, j} = R_l[k] S_k Q_k \hat{v}_{k, j}
///
/// where k ranges over the set of triangles and j over {1, 2, 3} (three points per triangle).
///
/// TODO: Really R_l[k] and not part rotations?
///
/// Note that for a complete consistent mesh multiple triangles must share common vertices,
/// i.e. they must agree upon them. Therefore, the final vertices are determined in such a way
/// that they minimize the compromise needed by every triangle, cf. equation (9) in [1].
/// </summary>
/// <param name="pr">The part rotations R_l for each part l.</param>
/// <param name="Q">The 3x3 linear transformation matrices Q_k deforming every triangle k.</param>
/// <param name="D">The 3x3 shape deformation matrices S_k deforming every triangle k.</param>
/// <returns>TODO</returns>
///
/// <seealso cref="decodeDefGradMesh"/>
std::vector<vec3d> reconstructModel(const PartRotations* pr, const std::vector<SMat3D>* Q, const std::vector<SMat3D>* D) const {
UInt num;
if (Q != NULL) num = Q->size();
else num = D->size();
std::vector< SquareMatrixND<vec3d> > Rots(num);
if (pr == NULL || Q == NULL) { // Only body shape change
std::cerr << "Only shape" << std::endl;
#pragma omp parallel
for (UInt i2 = 0; i2 < numT; i2++) Rots[i2] = (*D)[i2];
} else if (D == NULL) { // default body shape in input pose
#pragma omp parallel
// TODO: Why part rotations and not join rotations as per equation (6) in [1]
for (UInt i2 = 0; i2 < numT; i2++) Rots[i2] = pr->R[triIDtoPartID[i2]] * (*Q)[i2];
} else { // transfer pose and shape
#pragma omp parallel
for (UInt i2 = 0; i2 < numT; i2++) Rots[i2] = (*D)[i2] * pr->R[triIDtoPartID[i2]] * (*Q)[i2];
}
return decodeDefGradMesh(Rots);
}
//! Computes and returns the rigid rotations of the mesh parts.
PartRotations getRigidPartRotations(const std::vector<vec3d>& pose_verts) const {
PartRotations pr;
for (UInt i1 = 0; i1 < numParts; i1++) {
RigidTransform<vec3d> trans = computePartAlignment(rigidParts[i1], default_mesh_vertices, pose_verts);
pr.R[i1] = trans.rotate;
}
return pr;
}
//! Returns the joint rotations as rotation vectors. Note that the rotation vectors are already projected into their PCA basis.
JointRotations getJointRotationsFromPartRotations(const PartRotations& pr) const {
JointRotations jr;
// Compute joint rotations
for (UInt i1 = 0; i1 < numJoints; i1++) {
const Joint& j = joints[i1];
SquareMatrixND<vec3d> RM = pr.R[j.part1].getTransposed() * pr.R[j.part0];
jr.rv[i1] = RM.getRotVec();
jr.rv[i1] *= -1;
// Proj to pca basis
jr.rv[i1] = joint_PCA_bases[i1].TransposedVecTrans(jr.rv[i1]);
}
return jr;
}
std::vector<SMat3D> getAllQs(const JointRotations& jr) const {
std::vector<SMat3D> Qs(default_mesh_triangles.size());
#pragma omp parallel
for (UInt i1 = 0; i1 < default_mesh_triangles.size(); i1++) {
vec2i js = triToJoints[i1];
const vec3d& j0_rot = jr.rv[js.x];
vec3d j1_rot(0, 0, 0);
if (js.y > -1) j1_rot = jr.rv[js.y];
SMat3D Q = estimateQfromJoints(i1, j0_rot, j1_rot);
Qs[i1] = Q;
}
return Qs;
}
//! Projects the input model to SCAPE Space.
std::vector<vec3d> projectToSCAPESpace(const std::vector<vec3d>& verts, bool poseOnly = false, const std::vector<double>& shapeParams = std::vector<double>()) const {
PartRotations pr = getRigidPartRotations(verts);
JointRotations jr = getJointRotationsFromPartRotations(pr);
// pose
std::vector<SquareMatrixND<vec3d>> Q = getAllQs(jr);
std::vector<SquareMatrixND<vec3d>> Rots;
for (UInt i2 = 0; i2 < numT; i2++) {
SquareMatrixND<vec3d> R = pr.R[triIDtoPartID[i2]] * Q[i2];
Rots.push_back(R);
}
// shape
std::vector<SquareMatrixND<vec3d>> D;
computeDMatrices(verts, Rots, D);
std::vector<double> inpca = projectDToPCABasis(D, 0);
// override shape params
if (shapeParams.size() == pcabasis.numVecs) {
double* Dp = (double*)&D[0];
std::vector<double> Dlin = pcabasis.projectFromBasis(shapeParams);
#pragma omp parallel
for (int i1 = 0; i1 < numT * 9; i1++) Dp[i1] = Dlin[i1] + avg_defgrad[i1];
}
if (poseOnly) {
return reconstructModel(&pr, &Q, NULL); // Reconstruct pose only
} else {
return reconstructModel(&pr, &Q, &D); // Reconstruct pose and shape
}
}
//! Projects the input model to SCAPE Space.
std::vector<vec3d> projectToSCAPESpace(const std::vector<vec3d>& verts, std::vector<double>* params = NULL, int crop = 0) const {
PartRotations pr = getRigidPartRotations(verts);
JointRotations jr = getJointRotationsFromPartRotations(pr);
std::vector<SquareMatrixND<vec3d>> Q = getAllQs(jr);
std::vector<SquareMatrixND<vec3d>> Rots;
for (UInt i2 = 0; i2 < numT; i2++) {
SquareMatrixND<vec3d> R = pr.R[triIDtoPartID[i2]] * Q[i2];
Rots.push_back(R);
}
std::vector<SquareMatrixND<vec3d>> D;
computeDMatrices(verts, Rots, D);
std::vector<double> inpca = projectDToPCABasis(D, crop);
if (params != NULL) {
params->resize(3 * numParts + pcabasis.numVecs);
for (int i1 = 0; i1 < numParts; i1++) {
vec3d partRot = pr.R[i1].getRotVec();
(*params)[i1 * 3 + 0] = partRot.x;
(*params)[i1 * 3 + 1] = partRot.y;
(*params)[i1 * 3 + 2] = partRot.z;
}
for (int i1 = 0; i1 < pcabasis.numVecs; i1++) {
(*params)[3 * numParts + i1] = inpca[i1];
}
}
return reconstructModel(&pr, &Q, &D); // Reconstruct pose and shape
}
/// <summary>
/// Create a vertices model from a (pose|shape) vector.
/// </summary>
/// <param name="params">
/// A combined (pose|shape) vector where pose = (twist_1, twist_2, ..., twist_{numParts}) and
/// shape = (pcaCoeff_1, ..., pcaCoeff_{pcabasis.numVecs}).
///
/// The pose subvector shall contain <see cref="numParts"/> many three-dimensional twist subvectors for every part rotation.
/// A twist vector represents the rotation in an axis-angle fashion: The axis is determined by the vector's direction and
/// the angle is determined by the vector's magnitude.
///
/// The shape subvector shall contain <see cref="pcabasis.numvecs"/> many scalar PCA coefficients.
/// </param>
/// <returns>
/// Vertices in such an order that combining them with <see cref="topologically_correct_mesh_triangles"/>
/// leads to the desired triangle mesh model.
/// </returns>
std::vector<vec3d> reconstructFromSCAPEParams(const std::vector<double>& params) const {
PartRotations pr;
for (int i1 = 0; i1 < numParts; i1++) {
vec3d rv(params[i1 * 3 + 0], params[i1 * 3 + 1], params[i1 * 3 + 2]);
pr.R[i1].setToRotationMatrixNew(rv);
}
JointRotations jr = getJointRotationsFromPartRotations(pr);
std::vector<SquareMatrixND<vec3d>> Q = getAllQs(jr);
std::vector<SquareMatrixND<vec3d>> Rots(Q.size());
#pragma omp parallel
for (UInt i2 = 0; i2 < numT; i2++) {
SquareMatrixND<vec3d> R = pr.R[triIDtoPartID[i2]] * Q[i2];
Rots[i2] = R;
}
std::vector<SquareMatrixND<vec3d>> D(numT);
double* Dp = (double*)&D[0];
std::vector<double> inpca(pcabasis.numVecs);
for (int i1 = 0; i1 < pcabasis.numVecs; i1++) inpca[i1] = params[3 * numParts + i1];
std::vector<double> Dlin = pcabasis.projectFromBasis(inpca);
#pragma omp parallel
for (int i1 = 0; i1 < numT * 9; i1++) Dp[i1] = Dlin[i1] + avg_defgrad[i1];
return reconstructModel(&pr, &Q, &D); // Reconstruct pose and shape
}
//! Rotates the model to match the orientation of the base mesh
RigidTransform<vec3d> rigidlyAlignModel(std::vector<vec3d>& verts) const {
RigidTransform<vec3d> rtrans = rigidAlignPointClouds(&default_mesh_vertices[0], &verts[0], verts.size());
for (UInt i1 = 0; i1 < verts.size(); i1++) verts[i1] = rtrans.vecTrans(verts[i1]);
return rtrans;
}
//! Rotates the model to match the orientation of the base mesh
void rigidlyAlignModel(std::vector<vec3d>& verts, const std::vector<vec3d>& verts_reference, std::vector<double>* weights = NULL) const {
RigidTransform<vec3d> rtrans;
if (weights == NULL) rtrans = rigidAlignPointClouds(&verts_reference[0], &verts[0], verts.size());
else rtrans = rigidAlignPointCloudsWeighted(&verts_reference[0], &verts[0], verts.size(), &(*weights)[0]);
for (UInt i1 = 0; i1 < verts.size(); i1++) verts[i1] = rtrans.vecTrans(verts[i1]);
}
void saveToFile(const char* fn) const {
std::ofstream fout(fn, std::ios::binary);
fout << "scape model container ";
int numV = (int)default_mesh_vertices.size();
fout.write((char*)&numV, sizeof(int));
fout.write((char*)&default_mesh_vertices[0], sizeof(vec3d) * numV);
int numT = (int)default_mesh_triangles.size();
fout.write((char*)&numT, sizeof(int));
fout.write((char*)&default_mesh_triangles[0], sizeof(vec3i) * numT);
fout.write((char*)&numParts, sizeof(int));
for (int i1 = 0; i1 < numParts; i1++) rigidParts[i1].savePart(fout);
if (triToJoints.size() != numT) {
std::cerr << "size of triToJoints doesn't match number of triangles!" << std::endl;
std::cerr << triToJoints.size() << std::endl;
std::cerr << numT << std::endl;
exit(1);
}
fout.write((char*)&numT, sizeof(int));
fout.write((char*)&triToJoints[0], sizeof(vec2i) * numT);
if (perTriangleParameters.size() != numT) {
std::cerr << "size of perTriangleParams doesn't match number of triangles!" << std::endl;
exit(1);
}
fout.write((char*)&numT, sizeof(int));
fout.write((char*)&perTriangleParameters[0], sizeof(perTriangleParams) * numT);
std::cerr << "sizeof(perTriangleParams): " << sizeof(perTriangleParams) << std::endl;
if (joint_PCA_bases.size() != numJoints) {
std::cerr << "size of joint_PCA_bases doesn't match number of joints!" << std::endl;
exit(1);
}
fout.write((char*)&numJoints, sizeof(int));
fout.write((char*)&joint_PCA_bases[0], sizeof(SMat3D) * numJoints);
std::cerr << "sizeof(SMat3D): " << sizeof(SMat3D) << std::endl;
if (joints.size() != numJoints) {
std::cerr << "size of joints doesn't match number of joints!" << std::endl;
exit(1);
}
fout.write((char*)&numJoints, sizeof(int));
fout.write((char*)&joints[0], sizeof(Joint) * numJoints);
std::cerr << "sizeof(Joint): " << sizeof(Joint) << std::endl;
if (triIDtoPartID.size() != numT) {
std::cerr << "size of triIDtoPartID doesn't match number of triangles!" << std::endl;
exit(1);
}
fout.write((char*)&numT, sizeof(int));
fout.write((char*)&triIDtoPartID[0], sizeof(int) * numT);
int numX = (int)avg_defgrad.size();
fout.write((char*)&numX, sizeof(int));
fout.write((char*)&avg_defgrad[0], numX * sizeof(double));
// Save tri_neighbors:
numX = tri_neighbors.size();
fout.write((char*)&numX, sizeof(int));
fout.write((char*)&tri_neighbors[0], sizeof(vec3i) * numX);
pcabasis.saveToBinaryStream(fout);
int numTcorrect = (int)topologically_correct_mesh_triangles.size();
fout.write((char*)&numTcorrect, sizeof(int));
fout.write((char*)&topologically_correct_mesh_triangles[0], sizeof(vec3i) * numTcorrect);
numX = (int)avg_jointangles.size();
fout.write((char*)&numX, sizeof(int));
fout.write((char*)&avg_jointangles[0], numX * sizeof(double));
pose_pcabasis.saveToBinaryStream(fout);
numX = (int)avg_SCAPE_coeffs.size();
fout.write((char*)&numX, sizeof(int));
fout.write((char*)&avg_SCAPE_coeffs[0], numX * sizeof(double));
fout.close();
}
//
// init code
//
void readFromFile(const char* fn, const char* pathToMatrices, bool outputStatusMessagesToStderr = false) {
std::ifstream fin(fn, std::ios::binary);
if (!fin.good()) {
std::cerr << "Error reading file " << fn << " in SCAPE_Model.readFromFile()" << std::endl;
exit(1);
}
char header[24];
fin.read(header, sizeof(char) * 24);
//std::cerr << "SCAPE Header: " << header << std::endl;
int numV;
fin.read((char*)&numV, sizeof(int));
if (outputStatusMessagesToStderr) {
std::cerr << "numV: " << numV << std::endl;
}
default_mesh_vertices.resize(numV);
fin.read((char*)&default_mesh_vertices[0], sizeof(vec3d) * numV);
int numT;
fin.read((char*)&numT, sizeof(int));
if (outputStatusMessagesToStderr) {
std::cerr << "numT: " << numT << std::endl;
}
default_mesh_triangles.resize(numT);
fin.read((char*)&default_mesh_triangles[0], sizeof(vec3i) * numT);
int numP;
fin.read((char*)&numP, sizeof(int));
if (numP != numParts) {
std::cerr << "number of parts doesn't match numParts" << std::endl;
exit(1);
}
for (int i1 = 0; i1 < numParts; i1++) rigidParts[i1].readPart(fin);
int numTTJ;
fin.read((char*)&numTTJ, sizeof(int));
if (numTTJ != numT) {
std::cerr << "numTTJ doesn't match numTriangles" << std::endl;
exit(1);
}
triToJoints.resize(numT);
fin.read((char*)&triToJoints[0], sizeof(vec2i) * numT);
int numTP;
fin.read((char*)&numTP, sizeof(int));
if (numTP != numT) {
std::cerr << "numTP doesn't match numTriangles" << std::endl;
exit(1);
}
perTriangleParameters.resize(numT);
fin.read((char*)&perTriangleParameters[0], sizeof(perTriangleParams) * numT);
int numJB;
fin.read((char*)&numJB, sizeof(int));
if (numJB != numJoints) {
std::cerr << "numJB doesn't match numJoints" << std::endl;
exit(1);
}
joint_PCA_bases.resize(numJoints);
fin.read((char*)&joint_PCA_bases[0], sizeof(SMat3D) * numJoints);
int numJ;
fin.read((char*)&numJ, sizeof(int));
if (numJ != numJoints) {
std::cerr << "numJ doesn't match numJoints" << std::endl;
exit(1);
}
joints.resize(numJoints);
fin.read((char*)&joints[0], sizeof(Joint) * numJoints);
int numTTtP;
fin.read((char*)&numTTtP, sizeof(int));
if (numTTtP != numT) {
std::cerr << "numTTtP doesn't match numTriangles" << std::endl;
exit(1);
}
triIDtoPartID.resize(numT);
fin.read((char*)&triIDtoPartID[0], sizeof(int) * numT);
int numX;
fin.read((char*)&numX, sizeof(int));
avg_defgrad.resize(numX);
fin.read((char*)&avg_defgrad[0], numX * sizeof(double));
// Read tri_neighbors:
int numTriN;
fin.read((char*)&numTriN, sizeof(int));
tri_neighbors.resize(numTriN);
fin.read((char*)&tri_neighbors[0], sizeof(vec3i) * numTriN);
// Read PCA Basis
int rnv = 0;
pcabasis.loadFromStream(fin, rnv);
// Read manifold topology
int numTcorrect;
fin.read((char*)&numTcorrect, sizeof(int));
topologically_correct_mesh_triangles.resize(numTcorrect);
fin.read((char*)&topologically_correct_mesh_triangles[0], sizeof(vec3i) * numTcorrect);
// Read pose pca basis
if (fin.peek() != EOF) {
if (outputStatusMessagesToStderr) {
std::cerr << "Reading pose PCA basis" << std::endl;
}
fin.read((char*)&numX, sizeof(int));
avg_jointangles.resize(numX);
fin.read((char*)&avg_jointangles[0], numX * sizeof(double));
int rnv = 0;
pose_pcabasis.loadFromStream(fin, rnv);
}
// Read pose pca basis
if (fin.peek() != EOF) {
if (outputStatusMessagesToStderr) {
std::cerr << "Reading avg coeffs" << std::endl;
}
fin.read((char*)&numX, sizeof(int));
avg_SCAPE_coeffs.resize(numX);
fin.read((char*)&avg_SCAPE_coeffs[0], numX * sizeof(double));
}
fin.close();
loadUMFPACKMatrices(pathToMatrices);
}
void computeDMatrices(const std::vector<vec3d>& pose_verts, std::vector< SquareMatrixND<vec3d> > Rots, std::vector< SquareMatrixND<vec3d> >& D) const {
//double w = 0.000000001;
double w = 0.00001;
D.clear();
D.resize(numT);
std::vector<EdgePair> edges_template(numT);
std::vector<EdgePair> edges_pose(numT);
for (UInt i1 = 0; i1 < numT; i1++) {
const vec3i& t = default_mesh_triangles[i1];
vec3d v0_template = pose_verts[t.y] - pose_verts[t.x];
vec3d v1_template = pose_verts[t.z] - pose_verts[t.x];
vec3d v0_pose = default_mesh_vertices[t.y] - default_mesh_vertices[t.x];
vec3d v1_pose = default_mesh_vertices[t.z] - default_mesh_vertices[t.x];
const SquareMatrixND<vec3d>& M = Rots[i1];
edges_template[i1] = EdgePair(M.vecTrans(v0_pose), M.vecTrans(v1_pose));
edges_pose[i1] = EdgePair(v0_template, v1_template);
}
// Build right hand side
RandAccessCompRowMatrix<double> A_123(3 * numT, 3 * numT);
std::vector<double> rhs_123[3];
rhs_123[0].resize(3 * numT);
rhs_123[1].resize(3 * numT);
rhs_123[2].resize(3 * numT);
//#pragma omp parallel
for (UInt i1 = 0; i1 < numT; i1++) {
// Insert the three lines for a row of the i-th triangle
const EdgePair& ep0 = edges_template[i1];
const EdgePair& ep1 = edges_pose[i1];
SquareMatrixND<vec3d> mat(ep0.v0);
mat.addFromTensorProduct(ep0.v1, ep0.v1);
for (int x = 0; x < 3; x++) {
for (int y = 0; y < 3; y++) {
A_123.add(3 * i1 + x, 3 * i1 + y, mat(x, y));
}
}
rhs_123[0][3 * i1 + 0] = ep0.v0.x * ep1.v0.x + ep0.v1.x * ep1.v1.x;
rhs_123[0][3 * i1 + 1] = ep0.v0.y * ep1.v0.x + ep0.v1.y * ep1.v1.x;
rhs_123[0][3 * i1 + 2] = ep0.v0.z * ep1.v0.x + ep0.v1.z * ep1.v1.x;
rhs_123[1][3 * i1 + 0] = ep0.v0.x * ep1.v0.y + ep0.v1.x * ep1.v1.y;
rhs_123[1][3 * i1 + 1] = ep0.v0.y * ep1.v0.y + ep0.v1.y * ep1.v1.y;
rhs_123[1][3 * i1 + 2] = ep0.v0.z * ep1.v0.y + ep0.v1.z * ep1.v1.y;
rhs_123[2][3 * i1 + 0] = ep0.v0.x * ep1.v0.z + ep0.v1.x * ep1.v1.z;
rhs_123[2][3 * i1 + 1] = ep0.v0.y * ep1.v0.z + ep0.v1.y * ep1.v1.z;
rhs_123[2][3 * i1 + 2] = ep0.v0.z * ep1.v0.z + ep0.v1.z * ep1.v1.z;
// Add constraints:
for (UInt x = 0; x < 3; x++) {
const int& other = tri_neighbors[i1][x];
A_123.add(3 * i1 + 0, 3 * i1 + 0, w); A_123.add(3 * i1 + 0, 3 * other + 0, -w);
A_123.add(3 * i1 + 1, 3 * i1 + 1, w); A_123.add(3 * i1 + 1, 3 * other + 1, -w);
A_123.add(3 * i1 + 2, 3 * i1 + 2, w); A_123.add(3 * i1 + 2, 3 * other + 2, -w);
}
}
std::vector<double> entries;
std::vector<int> row_index;
std::vector<int> col_ptr;
A_123.getMatrix(entries, row_index, col_ptr);
void* Symbolic, * Numeric;
int result1 = umfpack_di_symbolic(3 * numT, 3 * numT, &col_ptr[0], &row_index[0], &entries[0], &Symbolic, NULL, NULL);
int result2 = umfpack_di_numeric(&col_ptr[0], &row_index[0], &entries[0], Symbolic, &Numeric, NULL, NULL);
std::vector<double> Qs(numT * 9);
for (int c = 0; c < 3; c++) {
double* b = &rhs_123[c][0];
double* x = &Qs[c * 3 * numT];
//int result3 = umfpack_di_solve(UMFPACK_A, &col_ptrQ[0], &row_indexQ[0], &entriesQ[0], x, b, NumericQ, NULL, NULL);
int result3 = umfpack_di_solve(UMFPACK_A, &col_ptr[0], &row_index[0], &entries[0], x, b, Numeric, NULL, NULL);
}
for (UInt i1 = 0; i1 < numT; i1++) {
double* r0 = &Qs[3 * i1];
double* r1 = &Qs[3 * i1 + 3 * numT];
double* r2 = &Qs[3 * i1 + 6 * numT];
D[i1].setRowI(vec3d(r0), 0);
D[i1].setRowI(vec3d(r1), 1);
D[i1].setRowI(vec3d(r2), 2);
}
umfpack_di_free_numeric(&Numeric);
umfpack_di_free_symbolic(&Symbolic);
}
/// <summary>
/// Compute the final vertices of a new instance given preliminiary vertices where the resulting triangle mesh would not be consistent.
///
/// See equation (9) in [1].
/// </summary>
/// <param name="rots">
/// The matrix "R_l[k] S_k Q_k" for every triangle k.
/// R_l[k] is the joint rotation of the join associated with the triangle k.
/// </param>
/// <seealso cref="reconstructModel"/>
/// <returns>
/// Final vertices of the new instance, so that the triangle mesh given by the computed vertices
/// and the ambient topology from the loaded SCAPE model is consistent.
/// </returns>
std::vector<vec3d> decodeDefGradMesh(std::vector<SMat3D>& rots) const {
int numV = (int)default_mesh_vertices.size();
// Build system matrix
#ifdef SAVE_MATRIX
RandAccessCompRowMatrix<double> A(numV - 1, numV - 1);
#endif
std::vector<double> rhs_x(numV);
std::vector<double> rhs_y(numV);
std::vector<double> rhs_z(numV);
for (int i1 = 0; i1 < numT; i1++) {
const int& v0 = default_mesh_triangles[i1].x;
const int& v1 = default_mesh_triangles[i1].y;
const int& v2 = default_mesh_triangles[i1].z;
const SMat3D& R = rots[i1];
vec3d vdash1 = default_mesh_vertices[v1] - default_mesh_vertices[v0];
vec3d vdash2 = default_mesh_vertices[v2] - default_mesh_vertices[v0];
vec3d Rv1 = R.vecTrans(vdash1);
vec3d Rv2 = R.vecTrans(vdash2);
#ifdef SAVE_MATRIX
if (v0 > 0) {
A.add(v0 - 1, v0 - 1, 2);
if (v1 > 0) A.add(v0 - 1, v1 - 1, -1);
if (v2 > 0) A.add(v0 - 1, v2 - 1, -1);
}
if (v1 > 0) {
A.add(v1 - 1, v1 - 1, 1);
if (v0 > 0) A.add(v1 - 1, v0 - 1, -1);
}
if (v2 > 0) {
A.add(v2 - 1, v2 - 1, 1);
if (v0 > 0) A.add(v2 - 1, v0 - 1, -1);
}
#endif
rhs_x[v0] -= (Rv1.x + Rv2.x);
rhs_y[v0] -= (Rv1.y + Rv2.y);
rhs_z[v0] -= (Rv1.z + Rv2.z);
rhs_x[v1] += Rv1.x;
rhs_y[v1] += Rv1.y;
rhs_z[v1] += Rv1.z;
rhs_x[v2] += Rv2.x;
rhs_y[v2] += Rv2.y;
rhs_z[v2] += Rv2.z;
}
#ifdef SAVE_MATRIX
std::vector<double> entries;
std::vector<int> row_index;
std::vector<int> col_ptr;
A.getMatrix(entries, row_index, col_ptr);
void* Symbolic, * Numeric;
int result1 = umfpack_di_symbolic(numV - 1, numV - 1, &col_ptr[0], &row_index[0], &entries[0], &Symbolic, NULL, NULL);
int result2 = umfpack_di_numeric(&col_ptr[0], &row_index[0], &entries[0], Symbolic, &Numeric, NULL, NULL);
umfpack_di_save_numeric(Numeric, "SCAPE_DGrad_numeric.bin");
umfpack_di_save_symbolic(Symbolic, "SCAPE_DGrad_symbolic.bin");
std::ofstream fout("matrixDGrad.bin", std::ios::binary);
int numE = (int)entries.size();
fout.write((char*)&numE, sizeof(int));
fout.write((char*)&entries[0], sizeof(double) * numE);
int numR = (int)row_index.size();
fout.write((char*)&numR, sizeof(int));
fout.write((char*)&row_index[0], sizeof(int) * numR);
int numC = (int)col_ptr.size();
fout.write((char*)&numC, sizeof(int));
fout.write((char*)&col_ptr[0], sizeof(int) * numC);
fout.close();
umfpack_di_save_symbolic(Symbolic, "SCAPE_DGrad_symbolic.bin");
umfpack_di_save_numeric(Numeric, "SCAPE_DGrad_numeric.bin");
#endif
std::vector<double> res(3 * numV);
//for (int i1 = 0; i1 < numV; i1++) std::cerr << rhs_x[i1] << std::endl;
for (int c = 0; c < 3; c++) {
double* b;
if (c == 0) b = &rhs_x[1];
else if (c == 1) b = &rhs_y[1];
else b = &rhs_z[1];
double* x = &res[c * numV + 1];
int result3 = umfpack_di_solve(UMFPACK_A, &col_ptrDGrad[0], &row_indexDGrad[0], &entriesDGrad[0], x, b, NumericDGrad, NULL, NULL);
//int result3 = umfpack_di_solve(UMFPACK_A, &col_ptr[0], &row_index[0], &entries[0], x, b, Numeric, NULL, NULL);
}
#ifdef SAVE_MATRIX
umfpack_di_free_numeric(&Numeric);
umfpack_di_free_symbolic(&Symbolic);
#endif
std::vector<vec3d> ret(numV);
#pragma omp parallel
for (int i1 = 0; i1 < numV; i1++) {
vec3d p(res[i1], res[numV + i1], res[2 * numV + i1]);
ret[i1] = p;
}
return ret;
}
//private:
void loadUMFPACKMatrices(const char* pathToMatrices) {
int numE;
int numR;
int numC;
//umfpack_di_load_numeric(&NumericQ, "SCAPE_Q_numeric.bin");
//umfpack_di_load_symbolic(&SymbolicQ, "SCAPE_Q_symbolic.bin");
//std::ifstream fin("matrixQ.bin", std::ios::binary);
//fin.read((char*)&numE, sizeof(int));
//entriesQ.resize(numE);
//fin.read((char*)&entriesQ[0], sizeof(double)*numE);
//fin.read((char*)&numR, sizeof(int));
//row_indexQ.resize(numR);
//fin.read((char*)&row_indexQ[0], sizeof(int)*numR);
//fin.read((char*)&numC, sizeof(int));
//col_ptrQ.resize(numC);
//fin.read((char*)&col_ptrQ[0], sizeof(int)*numC);
//fin.close();
std::stringstream ss1;
ss1 << pathToMatrices << "SCAPE_DGrad_numeric.bin";
std::stringstream ss2;
ss2 << pathToMatrices << "SCAPE_DGrad_symbolic.bin";
std::stringstream ss3;
ss3 << pathToMatrices << "matrixDGrad.bin";
int res = umfpack_di_load_numeric(&NumericDGrad, const_cast<char*>(ss1.str().c_str()));
if (res != 0) {
std::cerr << "Error reading SCAPE_DGrad_numeric.bin" << std::endl;
exit(1);
}
res = umfpack_di_load_symbolic(&SymbolicDGrad, const_cast<char*>(ss2.str().c_str()));
if (res != 0) {
std::cerr << "Error reading SCAPE_DGrad_symbolic.bin" << std::endl;
exit(1);
}
std::ifstream fin2(ss3.str().c_str(), std::ios::binary);
if (!fin2.good()) {
std::cerr << "Error reading matrixDGrad.bin" << std::endl;
exit(1);
}
fin2.read((char*)&numE, sizeof(int));
entriesDGrad.resize(numE);
fin2.read((char*)&entriesDGrad[0], sizeof(double) * numE);
fin2.read((char*)&numR, sizeof(int));
row_indexDGrad.resize(numR);
fin2.read((char*)&row_indexDGrad[0], sizeof(int) * numR);
fin2.read((char*)&numC, sizeof(int));
col_ptrDGrad.resize(numC);
fin2.read((char*)&col_ptrDGrad[0], sizeof(int) * numC);
fin2.close();
}
void* SymbolicQ;
void* NumericQ;
std::vector<double> entriesQ;
std::vector<int> row_indexQ;
std::vector<int> col_ptrQ;
void* SymbolicDGrad;
void* NumericDGrad;
std::vector<double> entriesDGrad;
std::vector<int> row_indexDGrad;
std::vector<int> col_ptrDGrad;
//! Vertices of default mesh
std::vector<vec3d> default_mesh_vertices;
//! Triangles of default mesh
std::vector<vec3i> default_mesh_triangles;
//! The rigid parts of the model (stors for each part the triangles and the vertices that belong to the part)."
Part rigidParts[numParts];
//! The non-rigid pose deformation of a triangle depends on these two joints.
std::vector< vec2i > triToJoints;
//! The regressed linear functions for reconstucting the affine matrices Qi from the closest joint rotations.
std::vector<perTriangleParams> perTriangleParameters;
//! The PCA bases of the joint rotation vectors.
std::vector< SMat3D > joint_PCA_bases;
//! The joints of the skeleton.
std::vector<Joint> joints;
//! Specifies the part ID for each triangle
std::vector<int> triIDtoPartID;
//! The average deformation gradient for the pca basis.
std::vector<double> avg_defgrad;
//! Reconstruction the model from these SCAPE parameters will yield the average person in the average pose.
std::vector<double> avg_SCAPE_coeffs;
//! The pca basis for body shapes (Encoded as D matrices (deformation gradients)).
PCA_Basis pcabasis;
//! The pca basis for the joint angles.
PCA_Basis pose_pcabasis;
//! the average joint angles.
std::vector<double> avg_jointangles;
//! Lists for each triangle the 3 neighboring triangles.
std::vector<vec3i> tri_neighbors;
/// <summary>
/// Triangles of of the topologically correct mesh. Use these when saving the mesh with <see cref="SimpleMesh"/>.
/// This vector links three vertix indices together to one triangle in every element.
/// </summary>
///
/// <remarks>
/// All models (whether the default ['average'] one or any deformed derivative) consist of the same amount of vertices,
/// which are on top of that also semantically identical.
/// For example, if the vertex with index 1234 in the average model is the tip of the nose, then the vertex with index
/// 1234 in any derived model will also reprsent the tip of the nose.
/// This vector links three vertices together to one triangle in every element.
/// In other words, every vec3i element contains three indices i1, i2, i3, so that the vertices with indices i1, i2, i3
/// form a triangle.
/// </remarks>
///
/// <see cref="SimpleMesh"/>
std::vector<vec3i> topologically_correct_mesh_triangles;
};
|
GB_binop__max_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint32)
// A*D function (colscale): GB (_AxD__max_uint32)
// D*A function (rowscale): GB (_DxB__max_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint32)
// C=scalar+B GB (_bind1st__max_uint32)
// C=scalar+B' GB (_bind1st_tran__max_uint32)
// C=A+scalar GB (_bind2nd__max_uint32)
// C=A'+scalar GB (_bind2nd_tran__max_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT32 || GxB_NO_MAX_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sync.c | /* Filename: Sync.c
* Author: Mohammed Sourouri <mohamso@simula.no>
*
* Synchronous Multi-GPU code where the number of threads spawned
* equals the number of GPUs. All memory transfers
* are synchronous. This code corresponds to "OpenMP" results in Figure-8 in the
* SC'14 paper.
*
*
* Copyright [2014] [Mohammed Sourouri]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Sync.h"
#define DEBUG
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
////////////////////////////////////////////////////////////////////////////////
// A method for checking error in CUDA calls
////////////////////////////////////////////////////////////////////////////////
inline void __checkCuda(cudaError_t error, const char *file, const int line)
{
#if defined(DEBUG) || defined(_DEBUG)
if (error != cudaSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line,
cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
#endif
return;
}
////////////////////////////////////////////////////////////////////////////////
// Program Main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
int Nx, Ny, Nz, max_iters;
int blockX, blockY, blockZ;
if (argc == 8) {
Nx = atoi(argv[1]);
Ny = atoi(argv[2]);
Nz = atoi(argv[3]);
max_iters = atoi(argv[4]);
blockX = atoi(argv[5]);
blockY = atoi(argv[6]);
blockZ = atoi(argv[7]);
}
else
{
printf("Usage: %s nx ny nz i block_x block_y block_z number_of_threads\n",
argv[0]);
exit(1);
}
// Get the number of GPUS
int number_of_devices;
checkCuda(cudaGetDeviceCount(&number_of_devices));
if (number_of_devices < 2) {
printf("Less than two devices were found.\n");
printf("Exiting...\n");
return -1;
}
// Decompose along the Z-axis
int _Nz = Nz/number_of_devices;
// Define constants
const _DOUBLE_ L = 1.0;
const _DOUBLE_ h = L/(Nx+1);
const _DOUBLE_ dt = h*h/6.0;
const _DOUBLE_ beta = dt/(h*h);
const _DOUBLE_ c0 = beta;
const _DOUBLE_ c1 = (1-6*beta);
// Check if ECC is turned on
ECCCheck(number_of_devices);
// Set the number of OpenMP threads
omp_set_num_threads(number_of_devices);
#pragma omp parallel
{
unsigned int tid = omp_get_num_threads();
#pragma omp single
{
printf("Number of OpenMP threads: %d\n", tid);
}
}
// CPU memory operations
int dt_size = sizeof(_DOUBLE_);
_DOUBLE_ *u_new, *u_old;
u_new = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2));
u_old = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2));
init(u_old, u_new, h, Nx, Ny, Nz);
// Allocate and generate arrays on the host
size_t pitch_bytes;
size_t pitch_gc_bytes;
_DOUBLE_ *h_Unew, *h_Uold;
_DOUBLE_ *h_s_Uolds[number_of_devices], *h_s_Unews[number_of_devices];
_DOUBLE_ *left_send_buffer[number_of_devices], *left_receive_buffer[number_of_devices];
_DOUBLE_ *right_send_buffer[number_of_devices], *right_receive_buffer[number_of_devices];
h_Unew = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2));
h_Uold = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(Nz+2));
init(h_Uold, h_Unew, h, Nx, Ny, Nz);
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
h_s_Unews[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_Nz+2));
h_s_Uolds[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_Nz+2));
right_send_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH));
left_send_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH));
right_receive_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH));
left_receive_buffer[tid] = (_DOUBLE_ *)malloc(sizeof(_DOUBLE_)*(Nx+2)*(Ny+2)*(_GC_DEPTH));
checkCuda(cudaHostAlloc((void**)&h_s_Unews[tid], dt_size*(Nx+2)*(Ny+2)*(_Nz+2), cudaHostAllocPortable));
checkCuda(cudaHostAlloc((void**)&h_s_Uolds[tid], dt_size*(Nx+2)*(Ny+2)*(_Nz+2), cudaHostAllocPortable));
checkCuda(cudaHostAlloc((void**)&right_send_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable));
checkCuda(cudaHostAlloc((void**)&left_send_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable));
checkCuda(cudaHostAlloc((void**)&right_receive_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable));
checkCuda(cudaHostAlloc((void**)&left_receive_buffer[tid], dt_size*(Nx+2)*(Ny+2)*(_GC_DEPTH), cudaHostAllocPortable));
init_subdomain(h_s_Uolds[tid], h_Uold, Nx, Ny, _Nz, tid);
}
// GPU memory operations
_DOUBLE_ *d_s_Unews[number_of_devices], *d_s_Uolds[number_of_devices];
_DOUBLE_ *d_right_send_buffer[number_of_devices], *d_left_send_buffer[number_of_devices];
_DOUBLE_ *d_right_receive_buffer[number_of_devices], *d_left_receive_buffer[number_of_devices];
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
checkCuda(cudaSetDevice(tid));
CopyToConstantMemory(c0, c1);
checkCuda(cudaMallocPitch((void**)&d_s_Uolds[tid], &pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2)));
checkCuda(cudaMallocPitch((void**)&d_s_Unews[tid], &pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2)));
checkCuda(cudaMallocPitch((void**)&d_left_receive_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH)));
checkCuda(cudaMallocPitch((void**)&d_right_receive_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH)));
checkCuda(cudaMallocPitch((void**)&d_left_send_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH)));
checkCuda(cudaMallocPitch((void**)&d_right_send_buffer[tid], &pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH)));
}
// Copy data from host to the device
double HtD_timer = 0.;
HtD_timer -= omp_get_wtime();
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
checkCuda(cudaSetDevice(tid));
checkCuda(cudaMemcpy2D(d_s_Uolds[tid], pitch_bytes, h_s_Uolds[tid], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_Nz+2)), cudaMemcpyDefault));
checkCuda(cudaMemcpy2D(d_s_Unews[tid], pitch_bytes, h_s_Unews[tid], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_Nz+2)), cudaMemcpyDefault));
}
HtD_timer += omp_get_wtime();
int pitch = pitch_bytes/dt_size;
int gc_pitch = pitch_gc_bytes/dt_size;
// GPU kernel launch parameters
dim3 threads_per_block(blockX, blockY, blockZ);
unsigned int blocksInX = getBlock(Nx, blockX);
unsigned int blocksInY = getBlock(Ny, blockY);
unsigned int blocksInZ = getBlock(_Nz-2, k_loop);
dim3 thread_blocks(blocksInX, blocksInY, blocksInZ);
dim3 thread_blocks_halo(blocksInX, blocksInY);
double compute_timer = 0.;
compute_timer -= omp_get_wtime();
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
for(int iterations = 0; iterations < max_iters; iterations++)
{
// Compute inner nodes
checkCuda(cudaSetDevice(tid));
ComputeInnerPoints(thread_blocks, threads_per_block, d_s_Unews[tid], d_s_Uolds[tid], pitch, Nx, Ny, _Nz);
// Copy right boundary data to host
if (tid == 0)
{
checkCuda(cudaSetDevice(tid));
CopyBoundaryRegionToGhostCell(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_right_send_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 0);
checkCuda(cudaMemcpy2D(right_send_buffer[tid], dt_size*(Nx+2), d_right_send_buffer[tid], pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH), cudaMemcpyDefault));
}
// Copy left boundary data to host
if (tid == 1)
{
checkCuda(cudaSetDevice(tid));
CopyBoundaryRegionToGhostCell(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_left_send_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 1);
checkCuda(cudaMemcpy2D(left_send_buffer[tid], dt_size*(Nx+2), d_left_send_buffer[tid], pitch_gc_bytes, dt_size*(Nx+2), (Ny+2)*(_GC_DEPTH), cudaMemcpyDefault));
}
#pragma omp barrier
// Copy right boundary data to device 1
if (tid == 1)
{
checkCuda(cudaSetDevice(tid));
checkCuda(cudaMemcpy2D(d_left_receive_buffer[tid], pitch_gc_bytes, right_send_buffer[tid-1], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_GC_DEPTH)), cudaMemcpyDefault));
CopyGhostCellToBoundaryRegion(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_left_receive_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 1);
}
// Copy left boundary data to device 0
if (tid == 0)
{
checkCuda(cudaSetDevice(tid));
checkCuda(cudaMemcpy2D(d_right_receive_buffer[tid], pitch_gc_bytes, left_send_buffer[tid+1], dt_size*(Nx+2), dt_size*(Nx+2), ((Ny+2)*(_GC_DEPTH)), cudaMemcpyDefault));
CopyGhostCellToBoundaryRegion(thread_blocks_halo, threads_per_block, d_s_Unews[tid], d_right_receive_buffer[tid], Nx, Ny, _Nz, pitch, gc_pitch, 0);
}
// Swap pointers on the host
#pragma omp barrier
checkCuda(cudaSetDevice(tid));
checkCuda(cudaDeviceSynchronize());
swap(_DOUBLE_*, d_s_Unews[tid], d_s_Uolds[tid]);
}
}
compute_timer += omp_get_wtime();
// Copy data from device to host
double DtH_timer = 0;
DtH_timer -= omp_get_wtime();
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
checkCuda(cudaSetDevice(tid));
checkCuda(cudaMemcpy2D(h_s_Uolds[tid], dt_size*(Nx+2), d_s_Uolds[tid], pitch_bytes, dt_size*(Nx+2), (Ny+2)*(_Nz+2), cudaMemcpyDeviceToHost));
}
DtH_timer += omp_get_wtime();
// Merge sub-domains into a one big domain
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
merge_domains(h_s_Uolds[tid], h_Uold, Nx, Ny, _Nz, tid);
}
// Calculate on host
#if defined(DEBUG) || defined(_DEBUG)
cpu_heat3D(u_new, u_old, c0, c1, max_iters, Nx, Ny, Nz);
#endif
float gflops = CalcGflops(compute_timer, max_iters, Nx, Ny, Nz);
PrintSummary("3D Heat (7-pt)", "Plane sweeping", compute_timer, HtD_timer, DtH_timer, gflops, max_iters, Nx);
_DOUBLE_ t = max_iters * dt;
CalcError(h_Uold, u_old, t, h, Nx, Ny, Nz);
#if defined(DEBUG) || defined(_DEBUG)
//exportToVTK(h_Uold, h, "heat3D.vtk", Nx, Ny, Nz);
#endif
#pragma omp parallel
{
unsigned int tid = omp_get_thread_num();
checkCuda(cudaSetDevice(tid));
checkCuda(cudaFree(d_s_Unews[tid]));
checkCuda(cudaFree(d_s_Uolds[tid]));
checkCuda(cudaFree(d_right_send_buffer[tid]));
checkCuda(cudaFree(d_left_send_buffer[tid]));
checkCuda(cudaFree(d_right_receive_buffer[tid]));
checkCuda(cudaFree(d_left_receive_buffer[tid]));
checkCuda(cudaFreeHost(h_s_Unews[tid]));
checkCuda(cudaFreeHost(h_s_Uolds[tid]));
checkCuda(cudaFreeHost(left_send_buffer[tid]));
checkCuda(cudaFreeHost(right_send_buffer[tid]));
checkCuda(cudaFreeHost(left_receive_buffer[tid]));
checkCuda(cudaFreeHost(right_receive_buffer[tid]));
checkCuda(cudaDeviceReset());
}
free(u_old);
free(u_new);
return 0;
}
|
jacobi_omp.c | /*
* Copyright (c) 2008, BSC (Barcelon Supercomputing Center)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <time.h>
#define NB 256
#define B 16
#define FALSE (0)
#define TRUE (1)
typedef double fp_type;
typedef fp_type *vin;
typedef fp_type *vout;
typedef fp_type *bin;
typedef fp_type *binout;
fp_type *A[NB][NB];
fp_type *A_new[NB][NB];
fp_type *tmp[NB][NB];
void alloc_and_genmat()
{
int init_val, i, j, ii, jj;
fp_type *p, *p_new;
init_val = 1325;
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL)
{
printf("Out of memory\n");
exit(1);
}
p = A[ii][jj];
p_new = A_new[ii][jj];
for (i = 0; i < B; i++)
{
for (j = 0; j < B; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (fp_type)((init_val - 32768.0) / 16384.0);
(*p_new) = (*p);
p++;
p_new++;
}
}
}
}
}
long usecs(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000000 + t.tv_usec;
}
void clear(vout v)
{
int i, j, k;
for (i = 0; i < B; i++)
v[i] = (fp_type)0.0;
}
void getlastrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[(B - 1) * B + j];
}
void getlastcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + B - 1];
}
void getfirstrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[0 * B + j];
}
void getfirstcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + 0];
}
void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new)
{
int i, j;
fp_type tmp;
fp_type left, top, right, bottom;
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
tmp = A[i * B + j];
left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]);
top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]);
right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]);
bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]);
A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom);
}
}
}
double maxdelta()
{
double dmax = -__DBL_MAX__;
int ii, jj, i, j;
#pragma omp parallel for schedule(static) reduction(max: dmax)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]);
if(diff > dmax) dmax = diff;
}
}
}
}
return dmax;
}
void compute(int niters)
{
int iters;
int ii, jj;
fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B];
double delta = 2.0;
double epsilon = 1e-7;
iters = 0;
// for (iters = 0; iters < niters; iters++)
while(iters < niters)
{
++iters;
#pragma omp parallel \
private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \
shared(A, A_new)
{
#pragma omp for schedule(static)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
if (ii > 0)
getlastrow(A[ii - 1][jj], tophalo);
else
clear(tophalo);
if (jj > 0)
getlastcol(A[ii][jj - 1], lefthalo);
else
clear(lefthalo);
if (ii < NB - 1)
getfirstrow(A[ii + 1][jj], bottomhalo);
else
clear(bottomhalo);
if (jj < NB - 1)
getfirstcol(A[ii][jj + 1], righthalo);
else
clear(lefthalo);
jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]);
} // jj
} // ii
} // end parallel
delta = maxdelta();
printf("iteration %d: delta = %e\n", iters, delta);
// yes, this is an inefficient copy
// however, the library version requires you to do a copy in this way
// on all of the component parts to avoid segmentation fault
#pragma omp parallel for schedule(static) shared(A, A_new)
for(int i = 0; i < NB; ++i)
{
for(int j = 0; j < NB; ++j)
{
for(int k = 0; k < B; ++k)
for(int l = 0; l < B; ++l)
A[i][j][k * B + l] = A_new[i][j][k * B + l];
}
}
} // iter
}
int main(int argc, char *argv[])
{
int niters;
// pp_time_t tm;
// memset( &tm, 0, sizeof(tm) );
struct timespec start, end;
if (argc > 1)
{
niters = atoi(argv[1]);
}
else
niters = 1;
alloc_and_genmat();
clock_gettime(CLOCK_MONOTONIC, &start);
compute(niters);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Running time = %g %s\n", time_taken, "s");
/* FILE *outFile;
outFile = fopen("./jacobi_omp_values.txt", "w");
if (outFile == NULL)
{
fprintf(stderr, "Error writing to file\n");
}
else
{
int ii, jj, i, j;
for (ii = 0; ii < NB; ++ii)
for (jj = 0; jj < NB; ++jj)
for (i = 0; i < B; ++i)
for (j = 0; j < B; ++j)
fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]);
fclose(outFile);
} */
return 0;
} |
par_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
//fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
{
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
}
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(A_ext);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
HYPRE_Int *P_marker, *P_marker_offd;
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int interp_type,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("DirInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,
interp_type, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/*------------------------------------------------
* Drop entries in interpolation matrix P
* max_elmts == 0 means no limit on rownnz
*------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
if (trunc_factor <= 0.0 && max_elmts == 0)
{
return 0;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(P) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGInterpTruncationDevice(P, trunc_factor, max_elmts);
}
else
#endif
{
HYPRE_Int rescale = 1; // rescale P
HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping
return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type);
}
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_BigInt *new_col_map_offd;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for (i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (new_num_cols_offd)
{
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return hypre_error_flag;
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
/* RL */
HYPRE_Int
hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
/* csr's */
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
/* arrays */
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int num_cols_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
/* CF marker off-diag part */
HYPRE_Int *CF_marker_offd = NULL;
/* func type off-diag part */
HYPRE_Int *dof_func_offd = NULL;
/* nnz */
HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd;
HYPRE_Int *marker_diag, *marker_offd = NULL;
/* local size */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
/* number of C-pts */
HYPRE_Int n_cpts = 0;
/* fine to coarse mapping: diag part and offd part */
HYPRE_Int *fine_to_coarse;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt total_global_cpts, my_first_cpt;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_int_buf_data = NULL;
//HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A);
//HYPRE_Int col_end = col_start + n_fine;
HYPRE_Int i, j, i1, j1, k1, index, start;
HYPRE_Int *max_abs_cij;
char *max_abs_diag_offd;
HYPRE_Real max_abs_aij, vv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
/* CF marker for the off-diag columns */
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* function type indicator for the off-diag columns */
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* if CommPkg of A is not present, create it */
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* number of sends to do (number of procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* send buffer, of size send_map_starts[num_sends]),
* i.e., number of entries to send */
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
/* copy CF markers of elements to send to buffer
* RL: why copy them with two for loops? Why not just loop through all in one */
index = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
/* loop through all elems to send_proc[i] */
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
/* CF marker of send_map_elemts[j] */
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* do a similar communication for dof_func */
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping,
* and find the most strongly influencing C-pt for each F-pt
*-----------------------------------------------------------------------*/
/* nnz in diag and offd parts */
cnt_diag = 0;
cnt_offd = 0;
max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
/* markers initialized as zeros */
marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
//fine_to_coarse[i] = my_first_cpt + n_cpts;
fine_to_coarse[i] = n_cpts;
n_cpts++;
continue;
}
/* mark all the strong connections: in S */
HYPRE_Int MARK = i + 1;
/* loop through row i of S, diag part */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
marker_diag[S_diag_j[j]] = MARK;
}
/* loop through row i of S, offd part */
if (num_procs > 1)
{
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
j1 = S_offd_j[j];
marker_offd[j1] = MARK;
}
}
fine_to_coarse[i] = -1;
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
* Find this C-pt and save it
*--------------------------------------------------------------------------*/
/* if we failed to find any strong C-pt, mark this point as an 'n' */
char marker = 'n';
/* max abs val */
max_abs_aij = -1.0;
/* loop through row i of A, diag part */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
i1 = A_diag_j[j];
vv = fabs(A_diag_data[j]);
#if 0
/* !!! this is a hack just for code verification purpose !!!
it basically says:
1. if we see |a_ij| < 1e-14, force it to be 1e-14
2. if we see |a_ij| == the max(|a_ij|) so far exactly,
replace it if the j idx is smaller
Reasons:
1. numerical round-off for eps-level values
2. entries in CSR rows may be listed in different orders
*/
vv = vv < 1e-14 ? 1e-14 : vv;
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK &&
vv == max_abs_aij && i1 < max_abs_cij[i])
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
continue;
}
#endif
/* it is a strong C-pt and has abs val larger than what have seen */
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij)
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
/* offd part */
if (num_procs > 1)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
i1 = A_offd_j[j];
vv = fabs(A_offd_data[j]);
if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij)
{
/* mark it as an 'o' */
marker = 'o';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
}
max_abs_diag_offd[i] = marker;
if (marker == 'd')
{
cnt_diag ++;
}
else if (marker == 'o')
{
cnt_offd ++;
}
}
nnz_diag = cnt_diag + n_cpts;
nnz_offd = cnt_offd;
/*------------- allocate arrays */
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST);
/* not in ``if num_procs > 1'',
* allocation needed even for empty CSR */
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST);
/* redundant */
P_diag_i[0] = 0;
P_offd_i[0] = 0;
/* reset counters */
cnt_diag = 0;
cnt_offd = 0;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST);
big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
big_int_buf_data[index++] = my_first_cpt
+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* Second Pass: Populate P
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] >= 0)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity.
*--------------------------------------------------------------------*/
//P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[i];
P_diag_data[cnt_diag++] = 1.0;
}
else
{
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
*--------------------------------------------------------------------------*/
if (max_abs_diag_offd[i] == 'd')
{
/* on diag part of P */
j = max_abs_cij[i];
//P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[j];
P_diag_data[cnt_diag++] = 1.0;
}
else if (max_abs_diag_offd[i] == 'o')
{
/* on offd part of P */
j = max_abs_cij[i];
P_offd_j[cnt_offd] = j;
P_offd_data[cnt_offd++] = 1.0;
}
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
hypre_assert(cnt_diag == nnz_diag);
hypre_assert(cnt_offd == nnz_offd);
/* num of cols in the offd part of P */
num_cols_offd_P = 0;
/* marker_offd: all -1 */
for (i = 0; i < num_cols_A_offd; i++)
{
marker_offd[i] = -1;
}
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
if (marker_offd[i1] == -1)
{
num_cols_offd_P++;
marker_offd[i1] = 1;
}
}
/* col_map_offd_P: the col indices of the offd of P
* we first keep them be the offd-idx of A */
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST);
for (i = 0, i1 = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i] == 1)
{
tmp_map_offd[i1++] = i;
}
}
hypre_assert(i1 == num_cols_offd_P);
/* now, adjust P_offd_j to local idx w.r.t col_map_offd_R
* by searching */
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P);
/* search must succeed */
hypre_assert(k1 >= 0 && k1 < num_cols_offd_P);
P_offd_j[i] = k1;
}
/* change col_map_offd_P to global coarse ids */
for (i = 0; i < num_cols_offd_P; i++)
{
col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]];
}
/* Now, we should have everything of Parcsr matrix P */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */
total_global_cpts, /* global num of cols */
hypre_ParCSRMatrixColStarts(A), /* row_starts */
num_cpts_global, /* col_starts */
num_cols_offd_P, /* num cols offd */
nnz_diag,
nnz_offd);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* P does not own ColStarts, since A does */
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
/* create CommPkg of P */
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* free workspace */
hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST);
hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag,HYPRE_MEMORY_HOST);
hypre_TFree(marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
ParallelHelper.h | /**
* This file contains (modified) code from the Eigen library.
* Eigen License:
*
* Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
* Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
*
* This Source Code Form is subject to the terms of the Mozilla
* Public License v. 2.0. If a copy of the MPL was not distributed
* with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*
* ======================
*
* The modifications are part of the Eigen Recursive Matrix Extension (ERME).
* ERME License:
*
* Copyright (c) 2019 Darius Rückert
* Licensed under the MIT License.
*/
#pragma once
#include "MatrixScalar.h"
#include <numeric>
namespace Eigen
{
namespace Recursive
{
template <typename T, typename T2>
inline void squaredNorm_omp_local(const T& v, T2& result)
{
// using Scalar = typename BaseScalar<T>::type;
result = 0;
#pragma omp for
for (int i = 0; i < v.rows(); ++i)
{
result += v(i).get().squaredNorm();
}
}
template <typename T, typename T2>
inline void dot_omp_local(const T& a, const T& b, T2& result)
{
// using Scalar = typename BaseScalar<T>::type;
result = 0;
#pragma omp for
for (int i = 0; i < a.rows(); ++i)
{
result += a(i).get().dot(b(i).get());
}
}
template <typename SparseLhsType, typename DenseRhsType, typename DenseResType>
inline void sparse_mv_omp(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res)
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef Eigen::internal::evaluator<Lhs> LhsEval;
typedef typename Eigen::internal::evaluator<Lhs>::InnerIterator LhsInnerIterator;
//#pragma omp single
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
// for (Index c = 0; c < rhs.cols(); ++c)
{
#pragma omp for
for (Index i = 0; i < n; ++i)
{
res.coeffRef(i).get().setZero();
for (LhsInnerIterator it(lhs, i); it; ++it)
{
auto& vlhs = it.value().get();
auto& vrhs = rhs.coeff(it.index()).get();
res.coeffRef(i).get() += vlhs * vrhs;
}
}
}
}
}
} // namespace Recursive
} // namespace Eigen
|
GB_binop__plus_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_fc64
// A.*B function (eWiseMult): GB_AemultB__plus_fc64
// A*D function (colscale): GB_AxD__plus_fc64
// D*A function (rowscale): GB_DxB__plus_fc64
// C+=B function (dense accum): GB_Cdense_accumB__plus_fc64
// C+=b function (dense accum): GB_Cdense_accumb__plus_fc64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_fc64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_fc64
// C=scalar+B GB_bind1st__plus_fc64
// C=scalar+B' GB_bind1st_tran__plus_fc64
// C=A+scalar GB_bind2nd__plus_fc64
// C=A'+scalar GB_bind2nd_tran__plus_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_add (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC64_add (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_fc64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_fc64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_fc64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_fc64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__plus_fc64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_fc64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_fc64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_fc64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_add (x, aij) ; \
}
GrB_Info GB_bind1st_tran__plus_fc64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_add (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__plus_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#elif defined(MAGICKCORE_HAVE_LCMS2_H)
#include <wchar.h>
#include "lcms2.h"
#elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H)
#include <lcms/lcms.h>
#else
#include "lcms.h"
#endif
#endif
/*
Define declarations.
*/
#if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000)
#define cmsSigCmykData icSigCmykData
#define cmsSigGrayData icSigGrayData
#define cmsSigLabData icSigLabData
#define cmsSigLuvData icSigLuvData
#define cmsSigRgbData icSigRgbData
#define cmsSigXYZData icSigXYZData
#define cmsSigYCbCrData icSigYCbCrData
#define cmsSigLinkClass icSigLinkClass
#define cmsColorSpaceSignature icColorSpaceSignature
#define cmsUInt32Number DWORD
#define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler)
#define cmsCreateTransformTHR(context,source_profile,source_type, \
target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \
source_type,target_profile,target_type,intent,flags);
#define cmsOpenProfileFromMemTHR(context,profile,length) \
cmsOpenProfileFromMem(profile,length)
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickSignature);
image->color_profile.length=clone_image->color_profile.length;
image->color_profile.info=clone_image->color_profile.info;
image->iptc_profile.length=clone_image->iptc_profile.length;
image->iptc_profile.info=clone_image->iptc_profile.info;
if (clone_image->profiles != (void *) NULL)
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
char
key[MaxTextExtent];
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
(void) CopyMagickString(key,name,MaxTextExtent);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,key);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static unsigned short **DestroyPixelThreadSet(unsigned short **pixels)
{
register ssize_t
i;
assert(pixels != (unsigned short **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (unsigned short *) NULL)
pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]);
pixels=(unsigned short **) RelinquishMagickMemory(pixels);
return(pixels);
}
static unsigned short **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
register ssize_t
i;
unsigned short
**pixels;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(unsigned short **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (unsigned short **) NULL)
return((unsigned short **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (unsigned short *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(image,source_profile,source_type,
target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000)
static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
Image
*image;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
image=(Image *) context;
if (image != (Image *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"UnableToTransformColorspace","`%s'",image->filename);
}
#else
static int LCMSExceptionHandler(int severity,const char *message)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s",
severity,message != (char *) NULL ? message : "no message");
return(1);
}
#endif
#endif
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,
const MagickBooleanType magick_unused(clone))
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
**arguments,
*names;
int
number_arguments;
register ssize_t
i;
/*
Delete image profile(s).
*/
names=ConstantString(name);
(void) SubstituteString(&names,","," ");
arguments=StringToArgv(names,&number_arguments);
names=DestroyString(names);
if (arguments == (char **) NULL)
return(MagickTrue);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
for (i=1; i < (ssize_t) number_arguments; i++)
{
if ((*arguments[i] == '!') &&
(LocaleCompare(name,arguments[i]+1) == 0))
break;
if (GlobExpression(name,arguments[i],MagickTrue) != MagickFalse)
{
(void) DeleteImageProfile(image,name);
ResetImageProfileIterator(image);
break;
}
}
name=GetNextImageProfile(image);
}
for (i=0; i < (ssize_t) number_arguments; i++)
arguments[i]=DestroyString(arguments[i]);
arguments=(char **) RelinquishMagickMemory(arguments);
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace");
(void) value;
/* Future.
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)",
image->filename);
#else
{
cmsHPROFILE
source_profile;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(LCMSExceptionHandler);
source_profile=cmsOpenProfileFromMemTHR(image,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
ExceptionInfo
*exception;
int
intent;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
unsigned short
**restrict source_pixels,
**restrict target_pixels;
exception=(&image->exception);
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR(image,
GetStringInfoDatum(icc_profile),(cmsUInt32Number)
GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_16;
source_channels=4;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_16;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_16;
source_channels=3;
break;
}
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
source_channels=3;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_16;
source_channels=3;
break;
}
case cmsSigYCbCrData:
{
source_colorspace=YCbCrColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
source_channels=3;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_16;
target_channels=4;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_16;
target_channels=3;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_16;
target_channels=1;
break;
}
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
target_channels=3;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_16;
target_channels=3;
break;
}
case cmsSigYCbCrData:
{
target_colorspace=YCbCrColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
target_channels=3;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == GRAYColorspace) &&
(IsGrayImage(image,exception) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == CMYKColorspace) &&
(image->colorspace != CMYKColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == XYZColorspace) &&
(image->colorspace != XYZColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == YCbCrColorspace) &&
(image->colorspace != YCbCrColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace != CMYKColorspace) &&
(source_colorspace != GRAYColorspace) &&
(source_colorspace != LabColorspace) &&
(source_colorspace != XYZColorspace) &&
(source_colorspace != YCbCrColorspace) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (unsigned short **) NULL) ||
(target_pixels == (unsigned short **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
register unsigned short
*p;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=ScaleQuantumToShort(GetPixelRed(q));
if (source_channels > 1)
{
*p++=ScaleQuantumToShort(GetPixelGreen(q));
*p++=ScaleQuantumToShort(GetPixelBlue(q));
}
if (source_channels > 3)
*p++=ScaleQuantumToShort(GetPixelIndex(indexes+x));
q++;
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum(*p));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
p++;
if (target_channels > 1)
{
SetPixelGreen(q,ScaleShortToQuantum(*p));
p++;
SetPixelBlue(q,ScaleShortToQuantum(*p));
p++;
}
if (target_channels > 3)
{
SetPixelIndex(indexes+x,ScaleShortToQuantum(*p));
p++;
}
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->matte == MagickFalse ? TrueColorType :
TrueColorMatteType;
break;
}
case cmsSigCmykData:
{
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
break;
}
case cmsSigGrayData:
{
image->type=image->matte == MagickFalse ? GrayscaleType :
GrayscaleMatteType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)
status=SetImageProfile(image,name,profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceBytes(const unsigned char *p,
const ssize_t count,unsigned char *quantum)
{
register ssize_t
i;
for (i=0; i < count; i++)
*quantum++=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
size_t *quantum)
{
*quantum=(size_t) (*p++ << 24);
*quantum|=(size_t) (*p++ << 16);
*quantum|=(size_t) (*p++ << 8);
*quantum|=(size_t) (*p++ << 0);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++ << 8);
*quantum|=(unsigned short) (*p++ << 0);
return(p);
}
static MagickBooleanType GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile;
unsigned char
length_byte;
size_t
count;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&count);
if ((p > (datum+length-count)) || (count > length))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution.
*/
p=ReadResourceShort(p,&resolution)+6;
image->x_resolution=(double) resolution;
p=ReadResourceShort(p,&resolution)+6;
image->y_resolution=(double) resolution;
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"iptc",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"exif",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"xmp",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return(MagickTrue);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile)
{
char
key[MaxTextExtent],
property[MaxTextExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MaxTextExtent);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if ((status != MagickFalse) &&
((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0)))
{
const StringInfo
*icc_profile;
/*
Continue to support deprecated color profile member.
*/
icc_profile=GetImageProfile(image,name);
if (icc_profile != (const StringInfo *) NULL)
{
image->color_profile.length=GetStringInfoLength(icc_profile);
image->color_profile.info=GetStringInfoDatum(icc_profile);
}
}
if ((status != MagickFalse) &&
((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0)))
{
const StringInfo
*iptc_profile;
/*
Continue to support deprecated IPTC profile member.
*/
iptc_profile=GetImageProfile(image,name);
if (iptc_profile != (const StringInfo *) NULL)
{
image->iptc_profile.length=GetStringInfoLength(iptc_profile);
image->iptc_profile.info=GetStringInfoDatum(iptc_profile);
}
(void) GetProfilesFromResourceBlock(image,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MaxTextExtent,"%s:sans",name);
(void) GetImageProperty(image,property);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline unsigned short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) ((buffer[1] << 8) | buffer[0]);
return((unsigned short) (value & 0xffff));
}
value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) |
((unsigned char *) buffer)[1]);
return((unsigned short) (value & 0xffff));
}
static inline size_t ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
size_t
value;
if (endian == LSBEndian)
{
value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) |
(buffer[1] << 8 ) | (buffer[0]));
return((size_t) (value & 0xffffffff));
}
value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) |
(buffer[2] << 8) | buffer[3]);
return((size_t) (value & 0xffffffff));
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) CopyMagickMemory(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) CopyMagickMemory(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,2);
}
MagickExport MagickBooleanType SyncImageProfiles(Image *image)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
StringInfo
*profile;
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile == (StringInfo *) NULL)
return(MagickTrue);
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4));
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
components,
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format-1) >= EXIF_NUM_FORMATS)
break;
components=(ssize_t) ((int) ReadProfileLong(endian,q+4));
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
offset;
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,q+8));
if ((ssize_t) (offset+number_bytes) < offset)
continue; /* prevent overflow */
if ((size_t) (offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ((int) ReadProfileLong(endian,p));
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12*
number_entries)));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
|
Repulsion.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <omp.h>
int main(){
const int N=10000, T=50, M=5;
float *grid, *forces;
float *posns;
float force_retention = 0.0;
int show = 0;
// Allocate some 1-D arrays
// Initialise to value 0
// Main grid has extra edge cells
// Use these in forces array so we don't have to struggle lining them up, but they are ignored
grid = calloc(N+2, sizeof(float));
forces = calloc(N+2, sizeof(float));
//End values set to 1.0
grid[0] = 1.0;
grid[N+1] = 1.0;
// Array for blob positions
// Initialise in a group roughly in middle
// Set grid to 1.0 where a blob is
posns = malloc(M*sizeof(int));
for(int ib=0; ib < M; ib++){
posns[ib] = ((float)N/2.0) - ((float)M/2.0) + (float)ib;
grid[(int)round(posns[ib])] = 1.0;
}
if(show){
for(int ix=0; ix<N+2; ix++){
if(grid[ix]>0.0){
printf("*");
}else{
printf(" ");
}
}
printf("\n");
}
//Iterate over time
for(int it = 1; it<= T; it++){
// Damp out previous forces. If we reset to 0 we get a minimisation
// If just damp, we get some oscillations
#pragma omp parallel for
for(int ix = 1; ix <N+1; ix++){
forces[ix] = forces[ix] * force_retention;
}
// Loop over cells in grid and calculate array of forces at each cell
#pragma omp parallel for
for(int ix=1; ix< N+1; ix++){
// Only apply forces within the domain, but make sure to include those
// from the edge cells!
for(int ix_f=0; ix_f< N+2; ix_f++){
if(ix == ix_f) continue;
// Force falls off as square of distance
forces[ix] = forces[ix] + grid[ix_f]/pow((float)(ix-ix_f),2) * copysign(1.0, ix-ix_f);
// NOTE : calculating all the forces like this is inefficient, but suppose for thi
// example that we have a good reason for it! For instance, imagine we really want
// the persistence of the forces or something
}
}
// Zero out grid before moving and re-placing blobs
memset(grid+1, 0, N * sizeof(float));
// Push blobs according to forces
for(int ib=0; ib< M; ib++){
// Include a factor for how rapidly the "forces" move the "blobs"
posns[ib] = posns[ib] + forces[(int)round(posns[ib])]*0.9;
// Forcibly keep in grid
if(posns[ib] < 1 ) posns[ib] = 1;
if(posns[ib] > N-1 ) posns[ib] = N-1;
// Place blob in new position
grid[(int)round(posns[ib])] = 1.0;
}
}
// Show final state to screen
if(show){
for(int ix=0; ix<N+2; ix++){
if(grid[ix]>0.0){
printf("*");
}else{
printf(" ");
}
}
printf("\n");
}
}
|
avx512_gemm.h | #pragma once
#include "intgemm/intgemm_config.h"
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW
#include "interleave.h"
#include "kernels.h"
#include "multiply.h"
#include "types.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
/* AVX512 implementation.
* This uses INTGEMM_AVX512BW, INTGEMM_AVX512DQ, and might use AVX512VL
* That means it supports mainstream CPUs with AVX512, starting with Skylake
* Xeons.
* It does not support any Knights / Xeon Phi processors.
*
* All memory must be 64-byte aligned.
*/
namespace intgemm {
// AVX512 has combined collapse and store instructions:
// _mm512_mask_cvtsepi32_storeu_epi16
// _mm512_mask_cvtsepi32_storeu_epi8
// So conversion in memory uses these, but I also implement a wider version for
// rearranging B.
namespace AVX512BW {
// Load from memory, multiply, and convert to int32_t.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW inline __m512i QuantizerGrab(const float *input, const __m512 quant_mult_reg) {
return kernels::quantize(loadu_ps<__m512>(input), quant_mult_reg);
}
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_SELECT_COL_B(INTGEMM_AVX512BW, __m512i)
// For PrepareB we want to read 8 columns at a time. When converting 32-bit
// floats to 8-bit values, that's 32 bytes of floats. But AVX512 is 64 bytes
// wide so it reads off the edge of the tile. We could expand the tile size
// but then the memory written to won't be contiguous anyway so we'd be doing a
// scatter anyway. Easier to just read the 8 columns we wanted as 256 bits
// concatenate.
INTGEMM_AVX512DQ inline __m512 Concat(const __m256 first, const __m256 second) {
// INTGEMM_AVX512DQ but that goes with INTGEMM_AVX512BW anyway.
return _mm512_insertf32x8(_mm512_castps256_ps512(first), second, 1);
}
// Like QuantizerGrab, but allows 32-byte halves (i.e. 8 columns) to be controlled independently.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW inline __m512i QuantizerGrabHalves(const float *input0, const float *input1, const __m512 quant_mult_reg) {
__m512 appended = Concat(loadu_ps<__m256>(input0), loadu_ps<__m256>(input1));
appended = _mm512_mul_ps(appended, quant_mult_reg);
return _mm512_cvtps_epi32(appended);
}
// These are only used for reshaping due to the AVX512 instructions
// _mm512_mask_cvtsepi32_storeu_epi16 and _mm512_mask_cvtsepi32_storeu_epi8
// being used for the quantizer.
class QuantizeTile16 {
public:
INTGEMM_AVX512BW static inline Register ConsecutiveWithWrapping(FRegister quant_mult, const float *input, Index cols_left, Index cols, Index row_step) {
auto input0 = input;
auto input1 = input + 16 + (cols_left <= 16 ? cols * (row_step - 1) : 0);
auto g0 = QuantizerGrabHalves(input0, input1, quant_mult);
auto g1 = QuantizerGrabHalves(input0 + 8, input1 + 8, quant_mult);
auto packed = packs_epi32(g0, g1);
return _mm512_permutex_epi64(packed, 0xd8 /* 0, 2, 1, 3 */);
}
INTGEMM_AVX512BW static inline Register ForReshape(FRegister quant_mult, const float *input, Index cols) {
__m512i g0 = QuantizerGrabHalves(input, input + 16 * cols, quant_mult);
__m512i g1 = QuantizerGrabHalves(input + 8 * cols, input + 24 * cols, quant_mult);
__m512i packed = packs_epi32(g0, g1);
// Permute within 256-bit lanes, so same as INTGEMM_AVX2
return _mm512_permutex_epi64(packed, 0xd8 /* 0, 2, 1, 3 */);
}
};
class QuantizeTile8 {
public:
INTGEMM_AVX512BW static inline Register ConsecutiveWithWrapping(FRegister quant_mult, const float *input, Index cols_left, Index cols, Index row_step) {
static const __m512i neg127 = _mm512_set1_epi8(-127);
static const __m512i shuffle_param = _mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0);
const float* inputs[4];
for (Index i = 0; i < sizeof(inputs) / sizeof(inputs[0]); ++i) {
while (cols_left < sizeof(Register) / sizeof(float)) {
input += cols * (row_step - 1);
cols_left += cols;
}
inputs[i] = input;
input += sizeof(Register) / sizeof(float);
cols_left -= sizeof(Register) / sizeof(float);
}
auto g0 = QuantizerGrab(inputs[0], quant_mult);
auto g1 = QuantizerGrab(inputs[1], quant_mult);
auto g2 = QuantizerGrab(inputs[2], quant_mult);
auto g3 = QuantizerGrab(inputs[3], quant_mult);
auto packed0 = packs_epi32(g0, g1);
auto packed1 = packs_epi32(g2, g3);
auto packed = _mm512_packs_epi16(packed0, packed1);
packed = _mm512_max_epi8(packed, neg127);
return _mm512_permutexvar_epi32(shuffle_param, packed);
}
INTGEMM_AVX512BW static inline __m512i ForReshape(FRegister quant_mult, const float *input, Index cols) {
// TODO: try alternative: _mm512_cvtsepi32_epi8 ?
const __m512i neg127 = _mm512_set1_epi8(-127);
// In reverse order: grabbing the first 32-bit values from each 128-bit register, then the second 32-bit values, etc.
const __m512i shuffle_param = _mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0);
// 32-bit format.
__m512i g0 = QuantizerGrabHalves(input, input + 2 * cols, quant_mult);
__m512i g1 = QuantizerGrabHalves(input + 16 * cols, input + 18 * cols, quant_mult);
__m512i g2 = QuantizerGrabHalves(input + 32 * cols, input + 34 * cols, quant_mult);
__m512i g3 = QuantizerGrabHalves(input + 48 * cols, input + 50 * cols, quant_mult);
// Pack 32-bit to 16-bit.
__m512i packed0 = packs_epi32(g0, g1);
__m512i packed1 = packs_epi32(g2, g3);
// Pack 16-bit to 8-bit.
__m512i packed = _mm512_packs_epi16(packed0, packed1);
// Ban -128.
packed = _mm512_max_epi8(packed, neg127);
// 0 1 2 3 16 17 18 19 32 33 34 35 48 49 50 51 4 5 6 7 20 21 22 23 36 37 38 39 52 53 54 55 8 9 10 11 24 25 26 27 40 41 42 43 56 57 58 59 12 13 14 15 28 29 30 31 44 45 46 47 60 61 62 63
return _mm512_permutexvar_epi32(shuffle_param, packed);
}
};
struct Kernels16 {
typedef int16_t Integer;
// Currently A is prepared by quantization but this could theoretically change.
// rows * cols must be a multiple of 16.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline void PrepareA(const float *input, int16_t *output, float quant_mult, Index rows, Index cols) {
Quantize(input, output, quant_mult, rows * cols);
}
// Technically output can be unaligned in Quantize.
// But then it will need to be aligned for Multiply.
// size must be a multiple of 16.
// Convert to 16-bit signed integers.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void Quantize(const float *input, int16_t *output, float quant_mult, Index size) {
assert(size % 16 == 0);
assert(reinterpret_cast<uintptr_t>(input) % 64 == 0);
// Fill with the quantization multiplier.
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
const float *end = input + size;
for (; input != end; input += 16, output += 16) {
// There doesn't seem to be an unmasked version.
_mm512_mask_cvtsepi32_storeu_epi16(output, 0xffff, QuantizerGrab(input, quant_mult_reg));
}
}
// Tile size for B; B must be a multiple of this block size.
static const Index kBTileRow = 32;
static const Index kBTileCol = 8;
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_PREPARE_B_16(INTGEMM_AVX512BW, QuantizeTile16)
INTGEMM_PREPARE_B_QUANTIZED_TRANSPOSED(INTGEMM_AVX512BW, int16_t)
INTGEMM_PREPARE_B_TRANSPOSED(INTGEMM_AVX512BW, QuantizeTile16, int16_t)
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void SelectColumnsB(const int16_t *input, int16_t *output, Index rows, const Index *cols_begin, const Index *cols_end) {
SelectColumnsOfB((const __m512i*)input, (__m512i*)output, rows * 2, cols_begin, cols_end);
}
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_MULTIPLY16(__m512i, INTGEMM_AVX512BW, CPUType::AVX2)
constexpr static const char *const kName = "16-bit AVX512";
static const CPUType kUses = CPUType::AVX512BW;
};
struct Kernels8 {
typedef int8_t Integer;
// Currently A is prepared by quantization but this could theoretically change.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline void PrepareA(const float *input, int8_t *output, float quant_mult, Index rows, Index cols) {
Quantize(input, output, quant_mult, rows * cols);
}
private:
/* g++ (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0 does not carry target attributes
* to the hidden function it creates in implementing #pragma omp parallel for.
* So intrinstics were not working inside the for loop when compiled with
* OMP. Also, passing register types across #pragma omp parallel for
* generated an internal compiler error.
* The problem does not occur in g++-8 (Ubuntu 8.3.0-6ubuntu1~18.04.1) 8.3.0.
* As a workaround, I split into #pragma omp parallel with boring types
* passed across the boundary then call this function with target attributes.
*/
INTGEMM_AVX512BW static void QuantizeThread(const float *input, int8_t *output, float quant_mult, std::size_t count) {
const __m512i neg127 = _mm512_set1_epi32(-127);
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
const std::size_t kBatch = sizeof(__m512i) / sizeof(float);
#pragma omp for
for (std::size_t i = 0; i < count; i += kBatch) {
__m512i asint = QuantizerGrab(input + i, quant_mult_reg);
asint = _mm512_max_epi32(asint, neg127);
// There doesn't seem to be an unmasked version.
_mm512_mask_cvtsepi32_storeu_epi8(output + i, 0xffff, asint);
}
}
public:
// Technically output can be unaligned in Quantize.
// But then it will need to be aligned for Multiply.
// Convert to 8-bit signed integers.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void Quantize(const float *input, int8_t *output, float quant_mult, Index size) {
assert(reinterpret_cast<uintptr_t>(input) % sizeof(__m512i) == 0);
const std::size_t kBatch = sizeof(__m512i) / sizeof(float);
std::size_t fast_size = (size & ~(kBatch - 1));
const float *fast_input_end = input + fast_size;
int8_t *fast_output_end = output + fast_size;
#pragma omp parallel
{
QuantizeThread(input, output, quant_mult, fast_size);
}
std::size_t overhang = size & (kBatch - 1);
if (!overhang) return; // We needed a branch anyway for the empty case.
const __m512i neg127 = _mm512_set1_epi32(-127);
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
__m512i asint = QuantizerGrab(fast_input_end, quant_mult_reg);
asint = _mm512_max_epi32(asint, neg127);
_mm512_mask_cvtsepi32_storeu_epi8(fast_output_end, (1 << overhang) - 1, asint);
}
// Preparing A for the signed/unsigned multiplication. Using add 127
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline void PrepareA(const float *input, uint8_t *output, float quant_mult, Index rows, Index cols) {
QuantizeU(input, output, quant_mult, rows * cols);
}
// Technically output can be unaligned in Quantize.
// But then it will need to be aligned for Multiply.
// Convert to 8-bit signed integers.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void QuantizeU(const float *input, uint8_t *output, float quant_mult, Index size) {
assert(size % 16 == 0);
assert(reinterpret_cast<uintptr_t>(input) % 64 == 0);
const __m512i pos127 = _mm512_set1_epi32(127);
const __m512i zero = _mm512_setzero_si512();
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
const float *end = input + size;
for (; input < end; input += 16, output += 16) {
__m512i asint = QuantizerGrab(input, quant_mult_reg);
asint = _mm512_min_epi32(asint, pos127);
asint = _mm512_add_epi32(asint, pos127);
asint = _mm512_max_epi32(asint, zero);
_mm512_mask_cvtusepi32_storeu_epi8(output, 0xffff, asint);
}
}
// Tile size for B; B must be a multiple of this block size.
static const Index kBTileRow = 64;
static const Index kBTileCol = 8;
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_PREPARE_B_8(INTGEMM_AVX512BW, QuantizeTile8)
INTGEMM_PREPARE_B_QUANTIZED_TRANSPOSED(INTGEMM_AVX512BW, int8_t)
INTGEMM_PREPARE_B_TRANSPOSED(INTGEMM_AVX512BW, QuantizeTile8, int8_t)
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void SelectColumnsB(const int8_t *input, int8_t *output, Index rows, const Index *cols_begin, const Index *cols_end) {
SelectColumnsOfB((const __m512i*)input, (__m512i*)output, rows, cols_begin, cols_end);
}
// Special AVX512 implementation due to having 32 registers (so I don't have to
// allocate registers manually) and no sign instruction.
template <typename Callback>
INTGEMM_AVX512BW static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) {
// This is copy-paste from Multiply8_SSE2OrAVX2.
assert(width % sizeof(Register) == 0);
assert(B_cols % 8 == 0);
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0);
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0);
// There's 8 results for INTGEMM_AVX2 to handle.
auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback);
const Index simd_width = width / sizeof(Register);
// Added for AVX512.
Register zeros = setzero_si<Register>();
// Go over 8 columns of B at a time.
#pragma omp for
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) {
const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width;
// Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) {
// Iterate over shared (inner) dimension.
const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width);
const Register *A_end = A_live + simd_width;
const Register *B_live = B0_col;
// Do the first iteration to initialize the sums.
__m512i a = *A_live;
__mmask64 neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128));
__m512i a_positive = _mm512_abs_epi8(a);
// These will be packed 16-bit integers containing sums for each column of B multiplied by the row of A.
Register sum0 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[0], neg_mask, zeros, B_live[0]));
Register sum1 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[1], neg_mask, zeros, B_live[1]));
Register sum2 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[2], neg_mask, zeros, B_live[2]));
Register sum3 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[3], neg_mask, zeros, B_live[3]));
Register sum4 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[4], neg_mask, zeros, B_live[4]));
Register sum5 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[5], neg_mask, zeros, B_live[5]));
Register sum6 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[6], neg_mask, zeros, B_live[6]));
Register sum7 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[7], neg_mask, zeros, B_live[7]));
++A_live;
B_live += 8;
// Use A as the loop variable so the add can be done where gcc likes it
// for branch prediction.
for (; A_live != A_end; ++A_live, B_live += 8) {
// Unique code here: can we do an inline function?
// Retrieve a. We will use this as the unsigned part.
a = *A_live;
// Retrieve the conveniently consecutive values of B.
__m512i b0 = *B_live;
__m512i b1 = *(B_live + 1);
__m512i b2 = *(B_live + 2);
__m512i b3 = *(B_live + 3);
__m512i b4 = *(B_live + 4);
__m512i b5 = *(B_live + 5);
__m512i b6 = *(B_live + 6);
__m512i b7 = *(B_live + 7);
// Get a mask where a is negative.
// Didn't seem to make a difference definining sign bits here vs at top
neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128));
a_positive = _mm512_abs_epi8(a);
// Negate by subtracting from zero with a mask.
b0 = _mm512_mask_sub_epi8(b0, neg_mask, zeros, b0);
b1 = _mm512_mask_sub_epi8(b1, neg_mask, zeros, b1);
b2 = _mm512_mask_sub_epi8(b2, neg_mask, zeros, b2);
b3 = _mm512_mask_sub_epi8(b3, neg_mask, zeros, b3);
b4 = _mm512_mask_sub_epi8(b4, neg_mask, zeros, b4);
b5 = _mm512_mask_sub_epi8(b5, neg_mask, zeros, b5);
b6 = _mm512_mask_sub_epi8(b6, neg_mask, zeros, b6);
b7 = _mm512_mask_sub_epi8(b7, neg_mask, zeros, b7);
// The magic 8-bit multiply then horizontal sum into 16-bit.
b0 = _mm512_maddubs_epi16(a_positive, b0);
b1 = _mm512_maddubs_epi16(a_positive, b1);
b2 = _mm512_maddubs_epi16(a_positive, b2);
b3 = _mm512_maddubs_epi16(a_positive, b3);
b4 = _mm512_maddubs_epi16(a_positive, b4);
b5 = _mm512_maddubs_epi16(a_positive, b5);
b6 = _mm512_maddubs_epi16(a_positive, b6);
b7 = _mm512_maddubs_epi16(a_positive, b7);
// Now we have 16-bit results that are the sum of two multiplies.
// Choosing to approximate and do adds.
// Perhaps every so often we could accumulate by upcasting.
sum0 = _mm512_adds_epi16(sum0, b0);
sum1 = _mm512_adds_epi16(sum1, b1);
sum2 = _mm512_adds_epi16(sum2, b2);
sum3 = _mm512_adds_epi16(sum3, b3);
sum4 = _mm512_adds_epi16(sum4, b4);
sum5 = _mm512_adds_epi16(sum5, b5);
sum6 = _mm512_adds_epi16(sum6, b6);
sum7 = _mm512_adds_epi16(sum7, b7);
// Unique code ends: can we do an inline function?
}
// Upcast to 32-bit and horizontally add.
Register ones = set1_epi16<Register>(1);
sum0 = madd_epi16(sum0, ones);
sum1 = madd_epi16(sum1, ones);
sum2 = madd_epi16(sum2, ones);
sum3 = madd_epi16(sum3, ones);
sum4 = madd_epi16(sum4, ones);
sum5 = madd_epi16(sum5, ones);
sum6 = madd_epi16(sum6, ones);
sum7 = madd_epi16(sum7, ones);
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3);
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7);
auto total = PermuteSummer(pack0123, pack4567);
callback_impl.Run(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols));
}
}
}
INTGEMM_MULTIPLY8SHIFT(__m512i, INTGEMM_AVX512BW, CPUType::AVX2)
INTGEMM_PREPAREBIASFOR8(__m512i, INTGEMM_AVX512BW, CPUType::AVX2)
constexpr static const char *const kName = "8-bit AVX512BW";
static const CPUType kUses = CPUType::AVX512BW;
};
} // namespace AVX512BW
} // namespace intgemm
#endif
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,const double weighting_exponent,
const MagickBooleanType verbose,ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
#define ThrowClassifyException(severity,tag,label) \
{\
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \
{ \
next_cluster=cluster->next; \
cluster=(Cluster *) RelinquishMagickMemory(cluster); \
} \
if (squares != (double *) NULL) \
{ \
squares-=255; \
free_squares=squares; \
free_squares=(double *) RelinquishMagickMemory(free_squares); \
} \
ThrowBinaryException(severity,tag,label); \
}
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
double
*free_squares;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickStatusType
status;
ssize_t
i;
double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
squares=(double *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireQuantumMemory(1,
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
(void) memset(cluster,0,sizeof(*cluster));
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
(void) memset(cluster,0,sizeof(*cluster));
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,p));
pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,p));
pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,p));
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) &&
(pixel.red <= (double) (cluster->red.right+SafeMargin)) &&
(pixel.green >= (double) (cluster->green.left-SafeMargin)) &&
(pixel.green <= (double) (cluster->green.right+SafeMargin)) &&
(pixel.blue >= (double) (cluster->blue.left-SafeMargin)) &&
(pixel.blue <= (double) (cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=pixel.red;
cluster->green.center+=pixel.green;
cluster->blue.center+=pixel.blue;
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowClassifyException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
const PixelInfo
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
SetPixelIndex(image,(Quantum) 0,q);
pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,q));
pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,q));
pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,q));
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) &&
(pixel.red <= (double) (cluster->red.right+SafeMargin)) &&
(pixel.green >= (double) (cluster->green.left-SafeMargin)) &&
(pixel.green <= (double) (cluster->green.right+SafeMargin)) &&
(pixel.blue >= (double) (cluster->blue.left-SafeMargin)) &&
(pixel.blue <= (double) (cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) cluster->id,q);
break;
}
}
if (cluster == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=
squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+
squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+
squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=
squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+
squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+
squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
const Quantum
*p;
ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireQuantumMemory(1,
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,p));
pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,p));
pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,p));
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) &&
(pixel.red <= (double) (cluster->red.right+SafeMargin)) &&
(pixel.green >= (double) (cluster->green.left-SafeMargin)) &&
(pixel.green <= (double) (cluster->green.right+SafeMargin)) &&
(pixel.blue >= (double) (cluster->blue.left-SafeMargin)) &&
(pixel.blue <= (double) (cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=pixel.red;
cluster->green.center+=pixel.green;
cluster->blue.center+=pixel.blue;
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
const Quantum
*p;
ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
ssize_t
count;
double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: Stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau*=PerceptibleReciprocal((double) number_nodes);
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ?
1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ?
1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ?
1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
utils.c | #include "utils.h"
// tune number of threads and block size considering processor
void tuning (int * threads, int * block_size, unsigned short int * vect_db_sequences_lengths,
unsigned short int * vect_db_sequences_blocks, unsigned long int vect_db_sequences_count){
#if AVX512F
int * submat = iBlosum62;
#else
char * submat = cBlosum62;
#endif
int open_gap=OPEN_GAP, extend_gap=EXTEND_GAP, b1, b2, qp_block_size=0, sp_block_size=0;
double workTime, totalTime=0, bestTime=0;
unsigned long int i;
char tuning_filename[30]=".swimm2_sse41_tuning";
#if AVX512F && KNL
strcpy(tuning_filename,".swimm2_knl_avx512f_tuning");
#elif AVX512F
strcpy(tuning_filename,".swimm2_avx512f_tuning");
#elif AVX2 && KNL
strcpy(tuning_filename,".swimm2_knl_avx2_tuning");
#elif AVX2
strcpy(tuning_filename,".swimm2_avx2_tuning");
#elif AVX512BW
strcpy(tuning_filename,".swimm2_avx512bw_tuning");
#endif
// tune number of threads (only if the user did not set it)
if (*threads == 0)
*threads = sysconf (_SC_NPROCESSORS_ONLN);
// open tuning filen
FILE * tuning_file = fopen(tuning_filename,"r");
if (tuning_file == NULL) {
printf("\nAuto-tuning. This step may take some minutes but is executed only once after build... ");
fflush(stdout);
char * tun_vect_db_sequences, * tun_query_sequence;
unsigned short int * tun_vect_db_sequence_lengths, * tun_vect_db_sequence_blocks, * tun_query_sequence_lengths;
unsigned long int tun_vect_db_sequences_count, * tun_vect_db_sequence_disp;
unsigned int * tun_query_sequence_disps;
int * scores;
// load synthetic query sequence
load_tuning_query_sequence (&tun_query_sequence, &tun_query_sequence_lengths, &tun_query_sequence_disps);
// load synthetic database sequence
assemble_tuning_chunk_db (&tun_vect_db_sequences, &tun_vect_db_sequence_lengths, &tun_vect_db_sequence_blocks,
&tun_vect_db_sequence_disp, &tun_vect_db_sequences_count) ;
// alloc memory for scores buffer
scores = _mm_malloc (TUNING_QUERY_COUNT*tun_vect_db_sequences_count*sizeof(int)*VECTOR_LENGTH, MEMALIGN);
// configure block size for data locality (Score Profile)
for (b1=TUNING_MIN_BLOCK_SIZE; b1<=TUNING_MAX_BLOCK_SIZE ; b1+=TUNING_BLOCK_SIZE_STEP){
// adapt block size
b2 = (b1 / DB_SEQ_LEN_MULT) * DB_SEQ_LEN_MULT;
// re-calculate number of blocks
for (i=0; i< tun_vect_db_sequences_count; i++ )
tun_vect_db_sequence_blocks[i] = ceil( (double) tun_vect_db_sequence_lengths[i] / (double) b2);
// set accumulator
totalTime=0;
// repeat test
for (i=0; i< TUNING_REPEAT_TIMES; i++ ) {
workTime = dwalltime();
#if AVX512F
// search using AVX512F instrucions and Adaptive Profile technique
search_avx512f_ap (tun_query_sequence, tun_query_sequence_lengths, TUNING_QUERY_COUNT, tun_query_sequence_disps, SCORE_PROFILE, query_length_threshold,
tun_vect_db_sequences, tun_vect_db_sequence_lengths, tun_vect_db_sequence_blocks, tun_vect_db_sequences_count, tun_vect_db_sequence_disp, (__m512i*)intSubmat,
open_gap, extend_gap, *threads, b2, (__m512i*)scores, &workTime);
#elif AVX512BW
// search using AVX512BW instrucions and Score Profile technique
search_avx512bw_sp (tun_query_sequence, tun_query_sequence_lengths, TUNING_QUERY_COUNT, tun_query_sequence_disps, tun_vect_db_sequences,
tun_vect_db_sequence_lengths, tun_vect_db_sequence_blocks, tun_vect_db_sequences_count, tun_vect_db_sequence_disp, charSubmat, open_gap, extend_gap,
*threads, b2, scores, &workTime);
#elif SSE41
// search using SSE4.1 instrucions and Score Profile technique
search_sse41_sp (tun_query_sequence, tun_query_sequence_lengths, TUNING_QUERY_COUNT, tun_query_sequence_disps, tun_vect_db_sequences,
tun_vect_db_sequence_lengths, tun_vect_db_sequence_blocks, tun_vect_db_sequences_count, tun_vect_db_sequence_disp, charSubmat, open_gap, extend_gap,
*threads, b2, scores, &workTime);
#else
// database search using AVX2 instrucions and Score Profile technique
search_avx2_sp (tun_query_sequence, tun_query_sequence_lengths, TUNING_QUERY_COUNT, tun_query_sequence_disps, tun_vect_db_sequences,
tun_vect_db_sequence_lengths, tun_vect_db_sequence_blocks, tun_vect_db_sequences_count, tun_vect_db_sequence_disp, charSubmat, open_gap, extend_gap,
*threads, b2, scores, &workTime);
#endif
totalTime += workTime;
}
if (bestTime == 0){
bestTime = totalTime;
sp_block_size = b2;
} else {
if (totalTime < bestTime) {
bestTime = totalTime;
sp_block_size = b2;
}
}
}
#if AVX512BW || AVX512F
bestTime=0;
// configure block size for data locality (Query profile)
for (b1=TUNING_MIN_BLOCK_SIZE; b1<=TUNING_MAX_BLOCK_SIZE ; b1+=TUNING_BLOCK_SIZE_STEP){
// adapt block size
b2 = (b1 / DB_SEQ_LEN_MULT) * DB_SEQ_LEN_MULT;
// re-calculate number of blocks
for (i=0; i< tun_vect_db_sequences_count; i++ )
tun_vect_db_sequence_blocks[i] = ceil( (double) tun_vect_db_sequence_lengths[i] / (double) b2);
// set accumulator
totalTime=0;
// repeat test
for (i=0; i< TUNING_REPEAT_TIMES; i++ ) {
workTime = dwalltime();
#if AVX512F
// search using AVX512F instrucions and Adaptive Profile technique
search_avx512f_ap (tun_query_sequence, tun_query_sequence_lengths, TUNING_QUERY_COUNT, tun_query_sequence_disps, QUERY_PROFILE, query_length_threshold,
tun_vect_db_sequences, tun_vect_db_sequence_lengths, tun_vect_db_sequence_blocks, tun_vect_db_sequences_count, tun_vect_db_sequence_disp, (__m512i*)intSubmat,
open_gap, extend_gap, *threads, b2, (__m512i*)scores, &workTime);
#elif AVX512BW
// search using AVX512bw instrucions and Query Profile technique
search_avx512bw_qp (tun_query_sequence, tun_query_sequence_lengths, TUNING_QUERY_COUNT, tun_query_sequence_disps, tun_vect_db_sequences,
tun_vect_db_sequence_lengths, tun_vect_db_sequence_blocks, tun_vect_db_sequences_count, tun_vect_db_sequence_disp, charSubmat, open_gap, extend_gap,
*threads, b2, scores, &workTime);
#endif
totalTime += workTime;
}
if (bestTime == 0){
bestTime = totalTime;
qp_block_size = b2;
} else {
if (totalTime < bestTime) {
bestTime = totalTime;
qp_block_size = b2;
}
}
}
#endif
// create file, save block size, close file
tuning_file = fopen(tuning_filename,"w");
fprintf(tuning_file,"%d %d",sp_block_size,qp_block_size);
fclose(tuning_file);
printf("Done.\n");
*block_size = (profile == QUERY_PROFILE ? qp_block_size : sp_block_size);
// re-calculate number of blocks
for (i=0; i< vect_db_sequences_count; i++ )
vect_db_sequences_blocks[i] = ceil( (double) vect_db_sequences_lengths[i] / (double) (*block_size));
_mm_free(tun_query_sequence);
_mm_free(tun_query_sequence_lengths);
_mm_free(tun_query_sequence_disps);
_mm_free(tun_vect_db_sequences);
_mm_free(tun_vect_db_sequence_lengths);
_mm_free(tun_vect_db_sequence_blocks);
_mm_free(tun_vect_db_sequence_disp);
_mm_free(scores);
} else {
printf("\nAuto-tuning... Skipped.\n");
// retrieve block size, close file
fscanf(tuning_file,"%d %d",&sp_block_size,&qp_block_size);
fclose(tuning_file);
*block_size = (profile == SCORE_PROFILE ? sp_block_size : qp_block_size);
// re-calculate number of blocks
for (i=0; i< vect_db_sequences_count; i++ )
vect_db_sequences_blocks[i] = ceil( (double) vect_db_sequences_lengths[i] / (double) (*block_size));
}
}
void merge_scores(int * scores, char ** titles, unsigned long int size) {
unsigned long int i1 = 0;
unsigned long int i2 = size / 2;
unsigned long int it = 0;
// allocate memory for temporary buffers
char ** tmp2 = (char **) malloc(size*sizeof(char *));
int * tmp3 = (int *) malloc (size*sizeof(int));
while(i1 < size/2 && i2 < size) {
if (scores[i1] > scores[i2]) {
tmp2[it] = titles[i1];
tmp3[it] = scores[i1];
i1++;
}
else {
tmp2[it] = titles[i2];
tmp3[it] = scores[i2];
i2 ++;
}
it ++;
}
while (i1 < size/2) {
tmp2[it] = titles[i1];
tmp3[it] = scores[i1];
i1++;
it++;
}
while (i2 < size) {
tmp2[it] = titles[i2];
tmp3[it] = scores[i2];
i2++;
it++;
}
memcpy(titles, tmp2, size*sizeof(char *));
memcpy(scores, tmp3, size*sizeof(int));
free(tmp2);
free(tmp3);
}
void mergesort_scores_serial(int * scores, char ** titles, unsigned long int size) {
int tmp_score;
char * tmp_seq;
if (size == 2) {
if (scores[0] <= scores[1]) {
// swap scores
tmp_score = scores[0];
scores[0] = scores[1];
scores[1] = tmp_score;
// swap titles
tmp_seq = titles[0];
titles[0] = titles[1];
titles[1] = tmp_seq;
}
} else {
if (size > 2){
mergesort_scores_serial(scores, titles, size/2);
mergesort_scores_serial(scores + size/2, titles + size/2, size - size/2);
merge_scores(scores, titles, size);
}
}
}
void sort_scores (int * scores, char ** titles, unsigned long int size, int threads) {
if ( threads == 1) {
mergesort_scores_serial(scores, titles, size);
}
else if (threads > 1) {
#pragma omp parallel sections num_threads(threads)
{
#pragma omp section
sort_scores(scores, titles, size/2, threads/2);
#pragma omp section
sort_scores(scores + size/2, titles + size/2, size-size/2, threads-threads/2);
}
merge_scores(scores, titles, size);
} // threads > 1
}
// Wall time
double dwalltime()
{
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
} |
addscaledmat.c | /*
Add a matrix plus a multiple of a second matrix and put the result in a
third matrix.
C=A+scale*B
*/
#include <stdlib.h>
#include <stdio.h>
#include "declarations.h"
void addscaledmat(A,scale,B,C)
struct blockmatrix A;
double scale;
struct blockmatrix B;
struct blockmatrix C;
{
int blk;
int i,j;
for (blk=1; blk<=A.nblocks; blk++)
{
switch (A.blocks[blk].blockcategory)
{
case DIAG:
for (i=1; i<=A.blocks[blk].blocksize; i++)
C.blocks[blk].data.vec[i] = A.blocks[blk].data.vec[i] + scale*B.blocks[blk].data.vec[i];
break;
case MATRIX:
#pragma omp parallel for schedule(dynamic,64) default(none) private(i,j) shared(A,B,C,scale,blk)
for (j=1; j<=A.blocks[blk].blocksize; j++)
for (i=1; i<=A.blocks[blk].blocksize; i++)
C.blocks[blk].data.mat[ijtok(i,j,A.blocks[blk].blocksize)]=
A.blocks[blk].data.mat[ijtok(i,j,A.blocks[blk].blocksize)]+
scale*B.blocks[blk].data.mat[ijtok(i,j,A.blocks[blk].blocksize)];
break;
case PACKEDMATRIX:
default:
printf("addscaledmat illegal block type \n");
exit(12);
};
};
}
|
GB_unop__identity_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp32_bool
// op(A') function: GB_unop_tran__identity_fp32_bool
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp32_bool
(
float *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ompt-signal.h | #if defined(WIN32) || defined(_WIN32)
#include <windows.h>
#define delay() Sleep(1);
#else
#include <unistd.h>
#define delay(t) usleep(t);
#endif
// These functions are used to provide a signal-wait mechanism to enforce expected scheduling for the test cases.
// Conditional variable (s) needs to be shared! Initialize to 0
#define OMPT_SIGNAL(s) ompt_signal(&s)
//inline
void ompt_signal(int* s)
{
#pragma omp atomic
(*s)++;
}
#define OMPT_WAIT(s,v) ompt_wait(&s,v)
// wait for s >= v
//inline
void ompt_wait(int *s, int v)
{
int wait=0;
do{
delay(10);
#pragma omp atomic read
wait = (*s);
}while(wait<v);
}
|
clauses-2.c | /* { dg-skip-if "PR 68733" { hppa*-*-hpux* && { ! lp64 } } } */
struct S { int r; int *s; int t[10]; };
void bar (int *);
void
foo (int *p, int q, struct S t, int i, int j, int k, int l)
{
#pragma omp target map (q), firstprivate (q) /* { dg-error "appears both in data and map clauses" } */
bar (&q);
#pragma omp target map (p[0]) firstprivate (p) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target firstprivate (p), map (p[0]) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target map (p[0]) map (p) /* { dg-error "appears both in data and map clauses" } */
bar (p);
#pragma omp target map (p) , map (p[0]) /* { dg-error "appears both in data and map clauses" } */
bar (p);
#pragma omp target map (q) map (q) /* { dg-error "appears more than once in map clauses" } */
bar (&q);
#pragma omp target map (p[0]) map (p[0]) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target map (t) map (t.r) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) map (t) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) map (t.r) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target firstprivate (t), map (t.r) /* { dg-error "appears both in data and map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (&t.r);
#pragma omp target map (t.s[0]) map (t) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target map (t) map(t.s[0]) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target firstprivate (t) map (t.s[0]) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t.s[0]) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t.s[0]) map (t.s[2]) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target map (t.t[0:2]) map (t.t[4:6]) /* { dg-error "appears more than once in map clauses" } */
bar (t.t);
#pragma omp target map (t.t[i:j]) map (t.t[k:l]) /* { dg-error "appears more than once in map clauses" } */
bar (t.t);
#pragma omp target map (t.s[0]) map (t.r)
bar (t.s);
#pragma omp target map (t.r) ,map (t.s[0])
bar (t.s);
#pragma omp target map (t.r) map (t) map (t.s[0]) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t) map (t.r) firstprivate (t) map (t.s[0]) /* { dg-error "appears both in data and map clauses" } */
bar (t.s); /* { dg-error "appears more than once in map clauses" "" { target *-*-* } .-1 } */
}
|
adjmapbqm.h | // Copyright 2020 D-Wave Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef DIMOD_ADJMAPBQM_H_
#define DIMOD_ADJMAPBQM_H_
#include <algorithm>
#include <map>
#include <utility>
#include <vector>
#include "dimod/utils.h"
namespace dimod {
template <class V, class B>
class AdjMapBQM {
public:
using bias_type = B;
using variable_type = V;
using size_type = std::size_t;
using outvars_iterator = typename std::map<V, B>::iterator;
using const_outvars_iterator = typename std::map<V, B>::const_iterator;
// in the future we'd probably like to make this protected
std::vector<std::pair<std::map<V, B>, B>> adj;
AdjMapBQM() {}
template <class BQM>
explicit AdjMapBQM(const BQM &bqm) {
adj.resize(bqm.num_variables());
for (variable_type v = 0; v < bqm.num_variables(); ++v) {
linear(v) = bqm.linear(v);
auto span = bqm.neighborhood(v);
adj[v].first.insert(span.first, span.second);
}
}
/**
* Construct a BQM from a dense array.
*
* @param dense An array containing the biases. Assumed to contain
* `num_variables`^2 elements. The upper and lower triangle are summed.
* @param num_variables The number of variables.
*/
template <class B2>
AdjMapBQM(const B2 dense[], size_type num_variables,
bool ignore_diagonal = false) {
// we know how big our linear is going to be
adj.resize(num_variables);
bias_type qbias;
if (!ignore_diagonal) {
for (size_type v = 0; v < num_variables; ++v) {
adj[v].second = dense[v * (num_variables + 1)];
}
}
for (size_type u = 0; u < num_variables; ++u) {
for (size_type v = u + 1; v < num_variables; ++v) {
qbias = dense[u * num_variables + v] +
dense[v * num_variables + u];
if (qbias != 0) {
adj[u].first.emplace_hint(adj[u].first.end(), v, qbias);
adj[v].first.emplace_hint(adj[v].first.end(), u, qbias);
}
}
}
}
/**
* Construct a BQM from a dense array. This constructor is parallelized
* and temporarily zeroes out the diagonal of the dense array but restores
* it back.
*
* @param dense An array containing the biases. Assumed to contain
* `num_variables`^2 elements. The upper and lower triangle are summed.
* @param num_variables The number of variables.
*/
template <class B2>
AdjMapBQM(B2 dense[], size_type num_variables,
bool ignore_diagonal = false) {
// we know how big our linear is going to be
adj.resize(num_variables);
// Backup copy of the diagonal of the dense matrix.
std::vector<B2> dense_diagonal(num_variables);
if (!ignore_diagonal) {
#pragma omp parallel for
for (size_type v = 0; v < num_variables; ++v) {
adj[v].second = dense[v * (num_variables + 1)];
}
}
#pragma omp parallel
{
// Zero out the diagonal to avoid expensive checks inside innermost
// loop in the code for reading the matrix. The diagonal will be
// restored so a backup copy is saved.
#pragma omp for schedule(static)
for (size_type v = 0; v < num_variables; ++v) {
dense_diagonal[v] = dense[v * (num_variables + 1)];
dense[v * (num_variables + 1)] = 0;
}
size_type counters[BLOCK_SIZE] = {0};
size_type buffer_size = num_variables * BLOCK_SIZE *
sizeof(std::pair<variable_type, bias_type>);
std::pair<variable_type, bias_type> *temp_buffer =
(std::pair<variable_type, bias_type> *)malloc(buffer_size);
if (temp_buffer == NULL) {
printf("Memory allocation failure.\n");
exit(0);
}
// We process the matrix in blocks of size BLOCK_SIZE*BLOCK_SIZE to take
// advantage of cache locality. Dynamic scheduling is used as we know some
// blocks may be more sparse than others and processing them may finish earlier.
#pragma omp for schedule(dynamic)
for (size_type u_st = 0; u_st < num_variables; u_st += BLOCK_SIZE) {
size_type u_end = std::min(u_st + BLOCK_SIZE, num_variables);
for (size_type v_st = 0; v_st < num_variables;
v_st += BLOCK_SIZE) {
size_type v_end =
std::min(v_st + BLOCK_SIZE, num_variables);
for (size_type u = u_st, n = 0; u < u_end; u++, n++) {
size_type counter_u = counters[n];
size_type counter_u_old = counter_u;
for (size_type v = v_st; v < v_end; v++) {
bias_type qbias = dense[u * num_variables + v] +
dense[v * num_variables + u];
if (qbias != 0) {
// Even though an intermediate buffer is not
// needed in case of this model of bqm, since we
// cannot preallocate a map using the number of
// elements in the buffer, inserting into the
// map directly here nullifies the benefits of
// cache blocking due to reallocation of the map
// causing cache pollution.
temp_buffer[n * num_variables + counter_u++] = {
v, qbias};
}
}
if (counter_u != counter_u_old) {
counters[n] = counter_u;
}
}
}
for (size_type n = 0; n < BLOCK_SIZE; n++) {
if (counters[n]) {
std::copy(temp_buffer + n * num_variables,
temp_buffer + n * num_variables + counters[n],
std::inserter(adj[u_st + n].first,
adj[u_st + n].first.begin()));
counters[n] = 0;
}
}
}
free(temp_buffer);
// Restore the diagonal of the original dense matrix
#pragma omp for schedule(static)
for (size_type v = 0; v < num_variables; ++v) {
dense[v * (num_variables + 1)] = dense_diagonal[v];
}
}
}
/**
* Construct a BQM from COO-formated iterators.
*
* A sparse BQM encoded in [COOrdinate] format is specified by three
* arrays of (row, column, value).
*
* [COOrdinate]: https://w.wiki/n$L
*
* @param row_iterator Iterator pointing to the beginning of the row data.
* Must be a random access iterator.
* @param col_iterator Iterator pointing to the beginning of the column
* data. Must be a random access iterator.
* @param bias_iterator Iterator pointing to the beginning of the bias data.
* Must be a random access iterator.
* @param length The number of (row, column, bias) entries.
* @param ignore_diagonal If true, entries on the diagonal of the sparse
* matrix are ignored.
*/
template <class ItRow, class ItCol, class ItBias>
AdjMapBQM(ItRow row_iterator, ItCol col_iterator, ItBias bias_iterator,
size_type length, bool ignore_diagonal = false) {
// determine the number of variables so we can allocate adj
if (length > 0) {
size_type max_label = std::max(
*std::max_element(row_iterator, row_iterator + length),
*std::max_element(col_iterator, col_iterator + length));
adj.resize(max_label + 1);
}
std::pair<outvars_iterator, bool> ret;
for (size_type i = 0; i < length; ++i) {
if (*row_iterator == *col_iterator) {
// linear bias
if (!ignore_diagonal) {
linear(*row_iterator) += *bias_iterator;
}
} else {
// quadratic bias
// make sure that we're adding if it already exists
ret = adj[*row_iterator].first.insert(
std::make_pair(*col_iterator, *bias_iterator));
if (!ret.second) {
ret.first->second += *bias_iterator;
}
ret = adj[*col_iterator].first.insert(
std::make_pair(*row_iterator, *bias_iterator));
if (!ret.second) {
ret.first->second += *bias_iterator;
}
}
++row_iterator;
++col_iterator;
++bias_iterator;
}
}
/// Add one (disconnected) variable to the BQM and return its index.
variable_type add_variable() {
adj.resize(adj.size() + 1);
return adj.size() - 1;
}
/// Get the degree of variable `v`.
size_type degree(variable_type v) const { return adj[v].first.size(); }
[[deprecated("Use AdjMapBQM::linear(v)")]] bias_type get_linear(
variable_type v) const { return linear(v); }
std::pair<bias_type, bool> get_quadratic(variable_type u,
variable_type v) const {
assert(u >= 0 && u < adj.size());
assert(v >= 0 && v < adj.size());
assert(u != v);
auto it = adj[u].first.find(v);
if (it == adj[u].first.end() || it->first != v)
return std::make_pair(0, false);
return std::make_pair(it->second, true);
}
bias_type &linear(variable_type v) {
assert(v >= 0 && v < adj.size());
return adj[v].second;
}
const bias_type &linear(variable_type v) const {
assert(v >= 0 && v < adj.size());
return adj[v].second;
}
std::pair<outvars_iterator, outvars_iterator> neighborhood(
variable_type u) {
assert(u >= 0 && u < adj.size());
return std::make_pair(adj[u].first.begin(), adj[u].first.end());
}
std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(
variable_type u) const {
assert(u >= 0 && u < adj.size());
return std::make_pair(adj[u].first.cbegin(), adj[u].first.cend());
}
/**
* The neighborhood of variable `v`.
*
* @param A variable `v`.
* @param The neighborhood will start with the first out variable that
* does not compare less than `start`.
*
* @returns A pair of iterators pointing to the start and end of the
* neighborhood.
*/
std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(
variable_type v, variable_type start) const {
return std::make_pair(adj[v].first.lower_bound(start),
adj[v].first.cend());
}
size_type num_variables() const { return adj.size(); }
size_type num_interactions() const {
size_type count = 0;
for (auto it = adj.begin(); it != adj.end(); ++it)
count += it->first.size();
return count / 2;
}
variable_type pop_variable() {
assert(adj.size() > 0);
variable_type v = adj.size() - 1;
// remove v from all of its neighbor's neighborhoods
for (auto it = adj[v].first.cbegin(); it != adj[v].first.cend(); ++it)
adj[it->first].first.erase(v);
adj.pop_back();
return adj.size();
}
bool remove_interaction(variable_type u, variable_type v) {
assert(u >= 0 && u < adj.size());
assert(v >= 0 && v < adj.size());
if (adj[u].first.erase(v) > 0) {
adj[v].first.erase(u);
return true;
}
return false;
}
[[deprecated("Use AdjMapBQM::linear(v)")]] void set_linear(variable_type v,
bias_type b) {
assert(v >= 0 && v < adj.size());
linear(v) = b;
}
bool set_quadratic(variable_type u, variable_type v, bias_type b) {
assert(u >= 0 && u < adj.size());
assert(v >= 0 && v < adj.size());
assert(u != v);
adj[u].first[v] = b;
adj[v].first[u] = b;
// to be consistent with AdjArrayBQM, we return whether the value was
// set
return true;
}
};
} // namespace dimod
#endif // DIMOD_ADJMAPBQM_H_
|
hw2b_time(static).c | #ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define PNG_NO_SETJMP
#include <sched.h>
#include <assert.h>
#include <png.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <mpi.h>
#include <pthread.h>
void write_png(const char* filename, int iters, int width, int height, const int* buffer) {
FILE* fp = fopen(filename, "wb");
assert(fp);
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
assert(png_ptr);
png_infop info_ptr = png_create_info_struct(png_ptr);
assert(info_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
size_t row_size = 3 * width * sizeof(png_byte);
png_bytep row = (png_bytep)malloc(row_size);
for (int y = 0; y < height; ++y) {
memset(row, 0, row_size);
for (int x = 0; x < width; ++x) {
int p = buffer[(height - 1 - y) * width + x];
png_bytep color = row + x * 3;
if (p != iters) {
if (p & 16) {
color[0] = 240;
color[1] = color[2] = p % 16 * 16;
} else {
color[0] = p % 16 * 16;
}
}
}
png_write_row(png_ptr, row);
}
free(row);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
int main(int argc, char** argv) {
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* argument parsing */
assert(argc == 9);
const char* filename = argv[1];
int iters = strtol(argv[2], 0, 10);
double left = strtod(argv[3], 0);
double right = strtod(argv[4], 0);
double lower = strtod(argv[5], 0);
double upper = strtod(argv[6], 0);
int width = strtol(argv[7], 0, 10);
int height = strtol(argv[8], 0, 10);
/* allocate memory for image */
int* image = (int*)malloc(width * height * sizeof(int));
int* result = (int*)malloc(width * height * sizeof(int));
double start = MPI_Wtime();
#pragma omp parallel for schedule(static)
/* mandelbrot set */
for (int j = rank; j < height; j += size) {
double y0 = j * ((upper - lower) / height) + lower;
for (int i = 0; i < width; ++i) {
double x0 = i * ((right - left) / width) + left;
int repeats = 0;
double x = 0;
double y = 0;
double length_squared = 0;
while (repeats < iters && length_squared < 4) {
double temp = x * x - y * y + x0;
y = 2 * x * y + y0;
x = temp;
length_squared = x * x + y * y;
++repeats;
}
image[j * width + i] = repeats;
}
}
double end = MPI_Wtime();
MPI_Reduce(image, result, width * height, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
printf("%f\n", end - start);
if (rank == 0){
/* draw and cleanup */
write_png(filename, iters, width, height, result);
free(image);
}
MPI_Finalize();
}
|
nodal_residualbased_elimination_builder_and_solver_for_FSI.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI )
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolverForFSI
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolverForFSI
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverForFSI);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
typedef GlobalPointersVector<Node<3> > NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolverForFSI(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverForFSI") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolverForFSI() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SetMaterialPropertiesToFluid(
ModelPart::NodeIterator itNode,
double & density,
double & deviatoricCoeff,
double & volumetricCoeff,
double timeInterval,
double nodalVolume)
{
density =itNode->FastGetSolutionStepValue(DENSITY);
deviatoricCoeff =itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if(yieldShear>0){
double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate=itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent=-adaptiveExponent*equivalentStrainRate;
if(equivalentStrainRate!=0){
deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent));
}
if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff=adaptiveExponent*yieldShear;
}
}
volumetricCoeff=timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS);
if(volumetricCoeff>0)
{
volumetricCoeff=timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS);
double bulkReduction=density*nodalVolume/(timeInterval*volumetricCoeff);
volumetricCoeff*=bulkReduction;
}
}
void SetMaterialPropertiesToSolid(
ModelPart::NodeIterator itNode,
double & density,
double & deviatoricCoeff,
double & volumetricCoeff,
double timeInterval,
double nodalVolume)
{
density =itNode->FastGetSolutionStepValue(SOLID_DENSITY);
double youngModulus=itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio=itNode->FastGetSolutionStepValue(POISSON_RATIO);
deviatoricCoeff = timeInterval*youngModulus/(1.0+poissonRatio)*0.5;
volumetricCoeff = timeInterval*poissonRatio*youngModulus/((1.0+poissonRatio)*(1.0-2.0*poissonRatio)) + 2.0*deviatoricCoeff/3.0;
}
void BuildSolidNodally(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the system
LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType solidEquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
const double FourThirds = 4.0 / 3.0;
const double nTwoThirds = -2.0 / 3.0;
double theta=0.5;
array_1d<double,3> Acc(3,0.0);
// array_1d<double,6> Sigma(6,0.0);
double dNdXi=0;
double dNdYi=0;
double dNdZi=0;
double dNdXj=0;
double dNdYj=0;
double dNdZj=0;
unsigned int firstRow=0;
unsigned int firstCol=0;
double density=0;
double deviatoricCoeff=0;
double volumetricCoeff=0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
if(itNode->Is(SOLID)){
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
// const unsigned int neighSize = neighb_nodes.size()+1;
const unsigned int neighSize = solidNodalSFDneighboursId.size();
const double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
if(neighSize>1 && nodalVolume>0)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size();
if (solidLHS_Contribution.size1() != localSize)
solidLHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!!
if (solidRHS_Contribution.size() != localSize)
solidRHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!!
if (solidEquationId.size() != localSize)
solidEquationId.resize(localSize, false);
solidLHS_Contribution= ZeroMatrix(localSize,localSize);
solidRHS_Contribution= ZeroVector(localSize);
this->SetMaterialPropertiesToSolid(itNode,density,deviatoricCoeff,volumetricCoeff,timeInterval,nodalVolume);
density=itNode->FastGetSolutionStepValue(SOLID_DENSITY);
firstRow=0;
firstCol=0;
if(dimension==2)
{
//////////////////////////// LHS TERMS //////////////////////////////
solidLHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval;
solidLHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,0);
solidRHS_Contribution[0]+=-nodalVolume*density*Acc[0];
solidRHS_Contribution[1]+=-nodalVolume*density*Acc[1];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
// RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX;
// RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY;
solidRHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0];
solidRHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1];
//-------- INTERNAL FORCES TERM -------//
array_1d<double,3> Sigma(3,0.0);
Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
solidEquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId();
solidEquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId();
for (unsigned int i = 0; i< neighSize; i++)
{
dNdXi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol+1];
solidRHS_Contribution[firstCol] += - nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[2]);
solidRHS_Contribution[firstCol+1]+= - nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[2]);
for (unsigned int j = 0; j< neighSize; j++)
{
dNdXj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow+1];
solidLHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff )*theta;
firstRow+=2;
}
firstRow=0;
firstCol+=2;
unsigned int indexNode=i+1;
if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){
unsigned int other_neigh_nodes_id=solidNodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for(unsigned int k = 0; k< neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if(neigh_nodes_id==other_neigh_nodes_id){
solidEquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId();
solidEquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
break;
}
}
}else if(i<neighb_nodes.size())
{
solidEquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId();
solidEquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
}
}
/* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */
}else if(dimension==3)
{
//////////////////////////// LHS TERMS //////////////////////////////
solidLHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval;
solidLHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval;
solidLHS_Contribution(2,2)+=nodalVolume*density*2.0/timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,0);
solidRHS_Contribution[0]+=-nodalVolume*density*Acc[0];
solidRHS_Contribution[1]+=-nodalVolume*density*Acc[1];
solidRHS_Contribution[2]+=-nodalVolume*density*Acc[2];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
solidRHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0];
solidRHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1];
solidRHS_Contribution[2]+=nodalVolume*density*VolumeAcceleration[2];
//-------- INTERNAL FORCES TERM -------//
array_1d<double,3> Sigma(6,0.0);
Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
solidEquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId();
solidEquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId();
solidEquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId();
for (unsigned int i = 0; i< neighSize; i++)
{
dNdXi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol+1];
dNdZi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol+2];
solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[3] + dNdZi*Sigma[4]);
solidRHS_Contribution[firstCol+1]+= -nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[3] + dNdZi*Sigma[5]);
solidRHS_Contribution[firstCol+2]+= -nodalVolume * (dNdZi*Sigma[2] + dNdXi*Sigma[4] + dNdYi*Sigma[5]);
for (unsigned int j = 0; j< neighSize; j++)
{
dNdXj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow+1];
dNdZj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow+2];
solidLHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi )* deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow,firstCol+2) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi )* deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+1,firstCol+2)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+2,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+2,firstCol+1)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff )*theta;
solidLHS_Contribution(firstRow+2,firstCol+2)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi )* deviatoricCoeff )*theta;
firstRow+=3;
}
firstRow=0;
firstCol+=3;
unsigned int indexNode=i+1;
if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){
unsigned int other_neigh_nodes_id=solidNodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for(unsigned int k = 0; k< neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if(neigh_nodes_id==other_neigh_nodes_id){
solidEquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId();
solidEquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
solidEquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
break;
}
}
}else if(i<neighb_nodes.size())
{
solidEquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId();
solidEquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
solidEquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
}
}
}
#ifdef _OPENMP
Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array);
#else
Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId);
#endif
}
}
}
// }
KRATOS_CATCH("")
}
void BuildFluidNodally(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
/* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
const double FourThirds = 4.0 / 3.0;
const double nTwoThirds = -2.0 / 3.0;
double theta=0.5;
array_1d<double,3> Acc(3,0.0);
// array_1d<double,6> Sigma(6,0.0);
double pressure=0;
double dNdXi=0;
double dNdYi=0;
double dNdZi=0;
double dNdXj=0;
double dNdYj=0;
double dNdZj=0;
unsigned int firstRow=0;
unsigned int firstCol=0;
double density=0;
double deviatoricCoeff=0;
double volumetricCoeff=0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true)
{
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
// const unsigned int neighSize = neighb_nodes.size()+1;
const unsigned int neighSize = nodalSFDneighboursId.size();
const double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if(neighSize>1 && nodalVolume>0)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
if (LHS_Contribution.size1() != localSize)
LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != localSize)
RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!!
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
LHS_Contribution= ZeroMatrix(localSize,localSize);
RHS_Contribution= ZeroVector(localSize);
this->SetMaterialPropertiesToFluid(itNode,density,deviatoricCoeff,volumetricCoeff,timeInterval,nodalVolume);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// // std::cout<<"density,deviatoricCoeff,volumetricCoeff "<<density<<" "<<deviatoricCoeff<<" "<<volumetricCoeff<<std::endl;
// std::cout<<"INTERFACE nodalVolume "<<nodalVolume<<std::endl;
// }else{
// std::cout<<"nodalVolume "<<nodalVolume<<std::endl;
// }
firstRow=0;
firstCol=0;
if(dimension==2)
{
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval;
LHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION,0);
RHS_Contribution[0]+=-nodalVolume*density*Acc[0];
RHS_Contribution[1]+=-nodalVolume*density*Acc[1];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
// RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX;
// RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY;
RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0];
RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1];
//-------- INTERNAL FORCES TERM -------//
array_1d<double,3> Sigma(3,0.0);
Sigma=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
if(itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true)
{
pressure=itNode->FastGetSolutionStepValue(PRESSURE,0)*theta+itNode->FastGetSolutionStepValue(PRESSURE,1)*(1-theta);
Sigma[0]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
Sigma[1]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
}
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId();
for (unsigned int i = 0; i< neighSize; i++)
{
dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1];
RHS_Contribution[firstCol] += - nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[2]);
RHS_Contribution[firstCol+1]+= - nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[2]);
for (unsigned int j = 0; j< neighSize; j++)
{
dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1];
LHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff )*theta;
firstRow+=2;
}
firstRow=0;
firstCol+=2;
unsigned int indexNode=i+1;
if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){
unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for(unsigned int k = 0; k< neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if(neigh_nodes_id==other_neigh_nodes_id){
EquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
break;
}
}
}else if(i<neighb_nodes.size())
{
EquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
}
}
/* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */
}else if(dimension==3)
{
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval;
LHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval;
LHS_Contribution(2,2)+=nodalVolume*density*2.0/timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION,0);
RHS_Contribution[0]+=-nodalVolume*density*Acc[0];
RHS_Contribution[1]+=-nodalVolume*density*Acc[1];
RHS_Contribution[2]+=-nodalVolume*density*Acc[2];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0];
RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1];
RHS_Contribution[2]+=nodalVolume*density*VolumeAcceleration[2];
//-------- INTERNAL FORCES TERM -------//
array_1d<double,3> Sigma(6,0.0);
Sigma=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
// }
if(itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true)
{
pressure=itNode->FastGetSolutionStepValue(PRESSURE,0)*theta+itNode->FastGetSolutionStepValue(PRESSURE,1)*(1-theta);
Sigma[0]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
Sigma[1]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
Sigma[2]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure;
}
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId();
EquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId();
for (unsigned int i = 0; i< neighSize; i++)
{
dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1];
dNdZi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+2];
RHS_Contribution[firstCol] += -nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[3] + dNdZi*Sigma[4]);
RHS_Contribution[firstCol+1]+= -nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[3] + dNdZi*Sigma[5]);
RHS_Contribution[firstCol+2]+= -nodalVolume * (dNdZi*Sigma[2] + dNdXi*Sigma[4] + dNdYi*Sigma[5]);
for (unsigned int j = 0; j< neighSize; j++)
{
dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1];
dNdZj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+2];
LHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi )* deviatoricCoeff )*theta;
LHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow,firstCol+2) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi )* deviatoricCoeff )*theta;
LHS_Contribution(firstRow+1,firstCol+2)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+2,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+2,firstCol+1)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff )*theta;
LHS_Contribution(firstRow+2,firstCol+2)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi )* deviatoricCoeff )*theta;
firstRow+=3;
}
firstRow=0;
firstCol+=3;
unsigned int indexNode=i+1;
if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){
unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl;
for(unsigned int k = 0; k< neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl;
if(neigh_nodes_id==other_neigh_nodes_id){
EquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
EquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
break;
}
}
}else if(i<neighb_nodes.size())
{
EquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
}
}
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
// }
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
// boost::timer m_build_time;
BuildSolidNodally(pScheme, rModelPart, A, b);
BuildFluidNodally(pScheme, rModelPart, A, b);
// std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl;
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
/* boost::timer m_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
ConditionsArrayType& pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp parallel for firstprivate(nconditions, ElementalDofList)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(it->get());
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
// boost::timer m_contruct_matrix;
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructureForFSI(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructureForFSI(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
// std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
#ifdef _OPENMP
,std::vector< omp_lock_t >& lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructureForFSI(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
#ifdef USE_GOOGLE_HASH
std::vector<google::dense_hash_set<std::size_t> > indices(equation_size);
const std::size_t empty_key = 2 * equation_size + 10;
#else
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#endif
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
#ifdef USE_GOOGLE_HASH
indices[iii].set_empty_key(empty_key);
#else
indices[iii].reserve(40);
#endif
}
Element::EquationIdVectorType EquationId;
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
if(itNode->Is(SOLID)){
const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
const unsigned int neighSize = nodalSFDneighboursId.size();
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
unsigned int firstCol=0;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId();
if(dimension==3)
EquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId();
if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i< neighb_nodes.size(); i++)
{
unsigned int indexNode=i+1;
if(indexNode<neighSize){
unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
firstCol+=dimension;
for (unsigned int k = 0; k< neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id=neighb_nodes[k].Id();
if(neigh_nodes_id==other_neigh_nodes_id){
EquationId[firstCol] =neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1] =neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
if(dimension==3){
EquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
}
break;
}
}
}
}
}else{
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i< neighb_nodes.size(); i++)
{
firstCol+=dimension;
EquationId[firstCol] =neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1] =neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
if(dimension==3){
EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
}
}
}
}
if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
const unsigned int neighSize = nodalSFDneighboursId.size();
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
unsigned int firstCol=0;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId();
if(dimension==3)
EquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId();
if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i< neighb_nodes.size(); i++)
{
unsigned int indexNode=i+1;
if(indexNode<neighSize){
unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
firstCol+=dimension;
for (unsigned int k = 0; k< neighb_nodes.size(); k++)
{
unsigned int neigh_nodes_id=neighb_nodes[k].Id();
if(neigh_nodes_id==other_neigh_nodes_id){
EquationId[firstCol] =neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1] =neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
if(dimension==3){
EquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
}
break;
}
}
}
}
}else{
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i< neighb_nodes.size(); i++)
{
firstCol+=dimension;
EquationId[firstCol] =neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId();
EquationId[firstCol+1] =neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId();
if(dimension==3){
EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId();
}
}
}
}
for (std::size_t i = 0; i < EquationId.size(); i++)
{
if (EquationId[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[EquationId[i]]);
#endif
auto& row_indices = indices[EquationId[i]];
for (auto it = EquationId.begin(); it != EquationId.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[EquationId[i]]);
#endif
}
}
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++)
{
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId( *(i_condition.base()) , ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
{
if (ids[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[ids[i]]);
#endif
auto& row_indices = indices[ids[i]];
for (auto it = ids.begin(); it != ids.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[ids[i]]);
#endif
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector< omp_lock_t > mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void AssembleRHS(
TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolverForFSI */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
|
openmp.c | #include <stdio.h>
#include <omp.h>
int main(void) {
#pragma omp parallel
{
printf("Hello, World... from thread = %d\n", omp_get_thread_num());
}
return 0;
}
|
libomp-example.c | #include <stdio.h>
#include <omp.h>
// https://medium.com/swlh/openmp-on-ubuntu-1145355eeb2
// sudo apt install libomp-dev
// gcc flag: -fopenmp
// link with gomp library
// libgomp, the GNU Offloading and Multi Processing Runtime Library
// https://gcc.gnu.org/onlinedocs/libgomp/
// https://www.openmp.org/wp-content/uploads/OpenMPRef-5.0-0519-web.pdf
// https://www.openmp.org/resources/tutorials-articles/
// https://hpc-tutorials.llnl.gov/posix/
int main() {
omp_set_num_threads(10);
#pragma omp parallel
{
printf("thread %d\n", omp_get_thread_num());
}
printf("***\n");
#pragma omp parallel for
for(int i=0;i<10;i++){
printf("%i\n",i);
}
printf("***\n");
return 0;
}
|
update_ops_named_Z.c |
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
//void Z_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void Z_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void Z_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void Z_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim);
void Z_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
//Z_gate_old_single(target_qubit_index, state, dim);
//Z_gate_old_parallel(target_qubit_index, state, dim);
//Z_gate_single(target_qubit_index, state, dim);
//Z_gate_single_simd(target_qubit_index, state, dim);
//Z_gate_single_unroll(target_qubit_index, state, dim);
//Z_gate_parallel(target_qubit_index, state, dim);
//return;
#ifdef _USE_SIMD
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
Z_gate_single_simd(target_qubit_index, state, dim);
}
else {
Z_gate_parallel_simd(target_qubit_index, state, dim);
}
#else
Z_gate_single_simd(target_qubit_index, state, dim);
#endif
#else
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
Z_gate_single_unroll(target_qubit_index, state, dim);
}
else {
Z_gate_parallel_unroll(target_qubit_index, state, dim);
}
#else
Z_gate_single_unroll(target_qubit_index, state, dim);
#endif
#endif
}
void Z_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
if (target_qubit_index == 0) {
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
state[basis_index+1] *= -1;
}
}
}
#ifdef _OPENMP
void Z_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
if (target_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
state[basis_index + 1] *= -1;
}
}
}
#endif
#ifdef _USE_SIMD
void Z_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
__m256d minus_one = _mm256_set_pd(-1,-1,-1,-1);
if (target_qubit_index == 0) {
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
double* ptr0 = (double*)(state + basis_index);
__m256d data0 = _mm256_loadu_pd(ptr0);
data0 = _mm256_mul_pd(data0, minus_one);
_mm256_storeu_pd(ptr0, data0);
}
}
}
#ifdef _OPENMP
void Z_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
__m256d minus_one = _mm256_set_pd(-1, -1, -1, -1);
if (target_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
double* ptr0 = (double*)(state + basis_index);
__m256d data0 = _mm256_loadu_pd(ptr0);
data0 = _mm256_mul_pd(data0, minus_one);
_mm256_storeu_pd(ptr0, data0);
}
}
}
#endif
#endif
/*
void Z_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
ITYPE mask = (1ULL << target_qubit_index);
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask;
state[temp_index] *= -1;
}
}
void Z_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
ITYPE mask = (1ULL << target_qubit_index);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask;
state[temp_index] *= -1;
}
}
void Z_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
}
}
#ifdef _OPENMP
void Z_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
}
}
#endif
*/ |
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if defined(__ARM_NEON)
static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
// int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if defined(__ARM_NEON)
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if defined(__ARM_NEON)
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if defined(__ARM_NEON)
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _sum2 = vld1q_f32(outptr2);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r04 = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2);
_sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3);
_sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0);
_sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1);
_sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2);
_sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3);
_sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0);
_sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1);
_sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3);
_sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0);
_sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1);
_sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2);
_sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k0123, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r11, _k0123, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k0123, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r13, _k0123, 3);
_sum2 = vfmaq_laneq_f32(_sum2, _r14, _k4567, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r20, _k4567, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k4567, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r22, _k4567, 3);
_sum2 = vfmaq_laneq_f32(_sum2, _r23, _k891011, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r24, _k891011, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r30, _k891011, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r31, _k891011, 3);
_sum2 = vfmaq_laneq_f32(_sum2, _r32, _k12131415, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r33, _k12131415, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r34, _k12131415, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r40, _k12131415, 3);
_sum2 = vfmaq_laneq_f32(_sum2, _r41, _k16171819, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r42, _k16171819, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r43, _k16171819, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r44, _k16171819, 3);
_sum2 = vfmaq_laneq_f32(_sum2, _r50, _k20212223, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r51, _k20212223, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r52, _k20212223, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r53, _k20212223, 3);
_sum2 = vfmaq_laneq_f32(_sum2, _r54, _k24242424, 0);
vst1q_f32(outptr, _sum);
vst1q_f32(outptr2, _sum2);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
outptr += 4;
outptr2 += 4;
}
#else
if (nn > 0)
{
asm volatile(
// "veor q13, q13 \n"
// "veor q14, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = out
"0: \n"
// q11 = rx1 / rx3
// q12 = rx2
// q13 q14 = intermediate sum register
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// q8 = out2
"pld [%4, #256] \n"
// r1
"vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14
"add %4, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r11
"vmul.f32 q13, q9, %e19[1] \n"
"vmla.f32 q8, q9, %e18[0] \n"
"vext.32 q12, q9, q10, #2 \n"// r12
"vmla.f32 q7, q11, %f19[0] \n"
"vmul.f32 q14, q11, %e18[1] \n"
"vext.32 q11, q9, q10, #3 \n"// r13
"vmla.f32 q13, q12, %f19[1] \n"
"vmla.f32 q8, q12, %f18[0] \n"
"vmla.f32 q7, q11, %e20[0] \n"
"vmla.f32 q14, q11, %f18[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q13, q10, %e20[1] \n"
"vmla.f32 q8, q10, %e19[0] \n"
// r2
"vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24
"add %5, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r21
"vmla.f32 q7, q9, %f20[0] \n"
"vmla.f32 q14, q9, %e19[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r22
"vmla.f32 q13, q11, %f20[1] \n"
"vmla.f32 q8, q11, %f19[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r23
"vmla.f32 q7, q12, %e21[0] \n"
"vmla.f32 q14, q12, %f19[1] \n"
"vmla.f32 q13, q11, %e21[1] \n"
"vmla.f32 q8, q11, %e20[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q7, q10, %f21[0] \n"
"vmla.f32 q14, q10, %e20[1] \n"
// r3
"vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34
"add %6, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r31
"vmla.f32 q13, q9, %f21[1] \n"
"vmla.f32 q8, q9, %f20[0] \n"
"vext.32 q12, q9, q10, #2 \n"// r32
"vmla.f32 q7, q11, %e22[0] \n"
"vmla.f32 q14, q11, %f20[1] \n"
"vext.32 q11, q9, q10, #3 \n"// r33
"vmla.f32 q13, q12, %e22[1] \n"
"vmla.f32 q8, q12, %e21[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q14, q11, %e21[1] \n"
"pld [%7, #256] \n"
"vmla.f32 q13, q10, %f22[1] \n"
"vmla.f32 q8, q10, %f21[0] \n"
// r4
"vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44
"add %7, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r41
"vmla.f32 q7, q9, %e23[0] \n"
"vmla.f32 q14, q9, %f21[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r42
"vmla.f32 q13, q11, %e23[1] \n"
"vmla.f32 q8, q11, %e22[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r43
"vmla.f32 q7, q12, %f23[0] \n"
"vmla.f32 q14, q12, %e22[1] \n"
"vmla.f32 q13, q11, %f23[1] \n"
"vmla.f32 q8, q11, %f22[0] \n"
"pld [%3, #256] \n"
"vmla.f32 q7, q10, %e24[0] \n"
"vmla.f32 q14, q10, %f22[1] \n"
// r0 and r5
"vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r01
"vmla.f32 q13, q11, %e18[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r02
"vmla.f32 q7, q12, %f18[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r03
"pld [%8, #256] \n"
"vmla.f32 q13, q11, %f18[1] \n"
// r5
"vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54
"add %8, #16 \n"
"vmla.f32 q8, q11, %e23[0] \n"
"vmla.f32 q14, q12, %e24[0] \n"
"vmla.f32 q7, q9, %e18[0] \n"
"vmla.f32 q13, q10, %e19[0] \n"
"vext.32 q9, q11, q12, #1 \n"// r51
"vext.32 q10, q11, q12, #2 \n"// r52
"vmla.f32 q14, q9, %e23[1] \n"
"vext.32 q9, q11, q12, #3 \n"// r53
"vmla.f32 q8, q10, %f23[0] \n"
"vmla.f32 q14, q9, %f23[1] \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q13, q13 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vadd.f32 q8, q8, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = out
// "veor q14, q14 \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
#if defined(__ARM_NEON)
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 = r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if defined(__ARM_NEON)
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if defined(__ARM_NEON)
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r04 = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2);
_sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3);
_sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0);
_sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1);
_sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2);
_sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3);
_sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0);
_sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1);
_sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3);
_sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0);
_sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1);
_sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2);
_sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%1, #128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j);
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
"vext.32 q10, q8, q9, #1 \n"// _r01
"vext.32 q11, q8, q9, #2 \n"// _r02
"vext.32 q12, q8, q9, #3 \n"// _r03
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q10, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q11, %f14[0] \n"
"vmul.f32 q15, q12, %f14[1] \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vld1.f32 {d16-d19}, [%3] \n"
"add %3, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q10, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q11, %f15[1] \n"
"vmla.f32 q15, q12, %e16[0] \n"
"vmla.f32 q7, q9, %e16[1] \n"
"vld1.f32 {d16-d19}, [%4] \n"
"add %4, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q10, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q11, %e17[0] \n"
"vmla.f32 q15, q12, %e17[1] \n"
"vmla.f32 q7, q9, %f17[0] \n"
"vld1.f32 {d16-d19}, [%5] \n"
"add %5, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q10, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q11, %e18[1] \n"
"vmla.f32 q15, q12, %f18[0] \n"
"vmla.f32 q7, q9, %f18[1] \n"
"vld1.f32 {d16-d19}, [%6] \n"
"add %6, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q10, %e19[1] \n"
"vmla.f32 q14, q11, %f19[0] \n"
"vmla.f32 q15, q12, %f19[1] \n"
"vmla.f32 q7, q9, %e20[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
#if defined(__ARM_NEON)
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if defined(__ARM_NEON)
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if defined(__ARM_NEON)
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if defined(__ARM_NEON)
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2);
_sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3);
_sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0);
_sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1);
_sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2);
_sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3);
_sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0);
_sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1);
_sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3);
_sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0);
_sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1);
_sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2);
_sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
// "veor q14, q14 \n"// _sump3 = 0;
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15
"pld [%1, #128] \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr
"vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8
"vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9
"vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q9, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q12, %f14[0] \n"
"vmul.f32 q15, q11, %f14[1] \n"
"vmla.f32 q7, q10, %e15[0] \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q9, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q12, %f15[1] \n"
"vmla.f32 q15, q11, %e16[0] \n"
"vmla.f32 q7, q10, %e16[1] \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q9, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q12, %e17[0] \n"
"vmla.f32 q15, q11, %e17[1] \n"
"vmla.f32 q7, q10, %f17[0] \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q9, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q12, %e18[1] \n"
"vmla.f32 q15, q11, %f18[0] \n"
"vmla.f32 q7, q10, %f18[1] \n"
"vld2.f32 {d16-d19}, [%6]! \n"
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q9, %e19[1] \n"
"vmla.f32 q14, q12, %f19[0] \n"
"vmla.f32 q15, q11, %f19[1] \n"
"vmla.f32 q7, q10, %e20[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15
// "veor q14, q14 \n"// _sump3 = 0;
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
#if defined(__ARM_NEON)
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
#endif // __ARM_NEON |
GB_unaryop__identity_uint8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_fp32
// op(A') function: GB_tran__identity_uint8_fp32
// C type: uint8_t
// A type: float
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_fp32
(
uint8_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "coders/coders-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
MagickBooleanType
supported;
PixelChannel
channel;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(QuantumScale*
GetPixelAlpha(image,q)*opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,ClampToQuantum((double) QuantumRange*
GetPixelAlpha(image,q)/(MagickRealType) opacity),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const PixelChannel channel,
const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
ssize_t
index;
if (channel == GrayPixelChannel)
{
index=(ssize_t) pixel;
if (packet_size == 1)
index=(ssize_t) ScaleQuantumToChar((Quantum) index);
index=ConstrainColormapIndex(image,index,exception);
SetPixelIndex(image,(Quantum) index,q);
}
else
{
index=(ssize_t) GetPixelIndex(image,q);
index=ConstrainColormapIndex(image,index,exception);
}
color=image->colormap+index;
if (channel == AlphaPixelChannel)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
}
else
SetPixelChannel(image,channel,pixel,q);
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const ssize_t row,
const PixelChannel channel,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum(((MagickRealType) QuantumRange)*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channel,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channel,packet_size,
(((unsigned char)((ssize_t) pixel)) & (0x01 << (7-bit))) != 0 ? 0 :
QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const PixelChannel channel,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,
const PixelChannel channel,MagickOffsetType *sizes,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,
const PixelChannel channel,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,y,channel,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel_index,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
end_offset,
offset;
MagickBooleanType
status;
PixelChannel
channel;
end_offset=(MagickOffsetType) layer_info->channel_info[channel_index].size-2;
if (layer_info->channel_info[channel_index].supported == MagickFalse)
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
channel_image=image;
channel=layer_info->channel_info[channel_index].channel;
mask=(Image *) NULL;
if (channel == ReadMaskPixelChannel)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)) ||
(layer_info->mask.page.width < 1) ||
(layer_info->mask.page.height < 1))
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
channel=GrayPixelChannel;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,channel,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,channel,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,channel,compression,
(const size_t) end_offset,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+end_offset,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType GetPixelChannelFromPsdIndex(const PSDInfo *psd_info,
ssize_t index,PixelChannel *channel)
{
*channel=RedPixelChannel;
switch (psd_info->mode)
{
case BitmapMode:
case IndexedMode:
case GrayscaleMode:
{
if (index == 1)
index=-1;
else if (index > 1)
index=StartMetaPixelChannel+index-2;
break;
}
case LabMode:
case MultichannelMode:
case RGBMode:
{
if (index == 3)
index=-1;
else if (index > 3)
index=StartMetaPixelChannel+index-4;
break;
}
case CMYKMode:
{
if (index == 4)
index=-1;
else if (index > 4)
index=StartMetaPixelChannel+index-5;
break;
}
}
if ((index < -2) || (index >= MaxPixelChannels))
return(MagickFalse);
if (index == -1)
*channel=AlphaPixelChannel;
else if (index == -2)
*channel=ReadMaskPixelChannel;
else
*channel=(PixelChannel) index;
return(MagickTrue);
}
static void SetPsdMetaChannels(Image *image,const PSDInfo *psd_info,
const unsigned short channels,ExceptionInfo *exception)
{
ssize_t
number_meta_channels;
number_meta_channels=(ssize_t) channels-psd_info->min_channels;
if (image->alpha_trait == BlendPixelTrait)
number_meta_channels--;
if (number_meta_channels > 0)
(void) SetPixelMetaChannels(image,(size_t) number_meta_channels,exception);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
SetPsdMetaChannels(layer_info->image,psd_info,layer_info->channels,exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const Image *image,
const PSDInfo *psd_info,LayerInfo *layer_info)
{
int
channel_type;
size_t
blob_size;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
blob_size=(size_t) GetBlobSize(image);
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
PixelChannel
channel;
if (layer_info->channel_info[i].size >= blob_size)
return(MagickFalse);
if (layer_info->channel_info[i].supported == MagickFalse)
continue;
channel=layer_info->channel_info[i].channel;
if ((i == 0) && (psd_info->mode == IndexedMode) &&
(channel != RedPixelChannel))
return(MagickFalse);
if (channel == AlphaPixelChannel)
{
channel_type|=AlphaChannel;
continue;
}
if (channel == RedPixelChannel)
channel_type&=~RedChannel;
else if (channel == GreenPixelChannel)
channel_type&=~GreenChannel;
else if (channel == BluePixelChannel)
channel_type&=~BlueChannel;
else if (channel == BlackPixelChannel)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
count,
index,
i,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].supported=GetPixelChannelFromPsdIndex(
psd_info,(ssize_t) ReadBlobSignedShort(image),
&layer_info[i].channel_info[j].channel);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].channel,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(image,psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (layer_info[i].channel_info[j].channel == AlphaPixelChannel)
{
layer_info[i].image->alpha_trait=BlendPixelTrait;
break;
}
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
SetPsdMetaChannels(image,psd_info,psd_info->channels,exception);
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
PixelChannel
channel;
status=GetPixelChannelFromPsdIndex(psd_info,i,&channel);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"MaximumChannelsExceeded","'%.20g'",(double) i);
break;
}
if (compression == RLE)
status=ReadPSDChannelRLE(image,channel,sizes+(i*image->rows),exception);
else
status=ReadPSDChannelRaw(image,channel,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
const char
*option;
Image
*next;
MagickBooleanType
replicate_profile;
option=GetImageOption(image_info,"psd:replicate-profile");
replicate_profile=IsStringTrue(option);
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
if (replicate_profile == MagickFalse)
break;
}
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType) &&
(image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
CompressionType
compression;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
(void) WriteBlobMSBLong(image,0); /* user mask data */
}
/*
Write composite image.
*/
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
DataArray.h | // Copyright (c) 2013 Vasili Baranau
// Distributed under the MIT software license
// See the accompanying file License.txt or http://opensource.org/licenses/MIT
#ifndef ImageProcessing_Model_Headers_DataArray_h
#define ImageProcessing_Model_Headers_DataArray_h
#include "Image.h"
#include "Core/Headers/StlUtilities.h"
#include "Core/Headers/Path.h"
#include "Core/Headers/MemoryUtility.h"
#include "ImageProcessing/Model/Headers/Config.h"
#include "ImageProcessing/Model/Headers/Constants.h"
#include "ImageProcessing/Services/Headers/Serializer.h"
#include "ImageProcessing/Model/Headers/IDataArray.h"
namespace Model
{
template<class TData>
class DataArray : public virtual IDataArray
{
public:
TData defaultValue;
private:
// Const variables
const Config* config;
const std::vector<ActiveArea>* activeAreas;
std::string workingPath;
std::vector<std::string> initialImageFilePaths;
std::vector<std::string> imageFilePaths;
TData*** dataArray;
// Dynamic variables
ActiveArea currentActiveArea;
bool imageChanged;
bool activeAreaLoaded;
public:
DataArray()
{
defaultValue = 0;
dataArray = NULL;
}
OVERRIDE void Initialize(const Config& currentConfig, std::string currentWorkingPath, const std::vector<ActiveArea>& currentActiveAreas)
{
config = ¤tConfig;
activeAreas = ¤tActiveAreas;
workingPath = currentWorkingPath;
Clear();
FillImagePaths();
imageChanged = false;
activeAreaLoaded = false;
AllocateMemory();
}
std::string GetWorkingPath() const
{
return workingPath;
}
virtual ~DataArray()
{
Clear();
}
OVERRIDE int GetBytesPerPixel() const
{
return sizeof(TData);
}
inline TData GetPixel(const Core::DiscreteSpatialVector& position) const
{
return GetPixel(position[Core::Axis::X], position[Core::Axis::Y], position[Core::Axis::Z]);
}
inline TData GetPixel(int x, int y, int z) const
{
int localXIndex = x - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::X];
int localYIndex = y - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Y];
int localZIndex = z - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
// The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// but adding them together makes z axis the major one.
return dataArray[localZIndex][localXIndex][localYIndex];
}
// inline TData& GetPixelReference(const Core::DiscreteSpatialVector& position) const
// {
// return GetPixelReference(position[Core::Axis::X], position[Core::Axis::Y], position[Core::Axis::Z]);
// }
//
// inline TData& GetPixelReference(int x, int y, int z) const
// {
// int localXIndex = x - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::X];
// int localYIndex = y - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Y];
// int localZIndex = z - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
//
// // The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// // but adding them together makes z axis the major one.
// return dataArray[localZIndex][localXIndex][localYIndex];
// }
inline void SetPixel(const Core::DiscreteSpatialVector& position, TData value)
{
SetPixel(position[Core::Axis::X], position[Core::Axis::Y], position[Core::Axis::Z], value);
}
inline void SetPixel(int x, int y, int z, TData value)
{
imageChanged = true;
int localXIndex = x - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::X];
int localYIndex = y - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Y];
int localZIndex = z - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
dataArray[localZIndex][localXIndex][localYIndex] = value;
}
// Saves the current active area to disk, loads the new active area from disk. If there are no images for the new active area, initializes the missing values with zeros
OVERRIDE void ChangeActiveArea(int activeAreaIndex)
{
Core::Path::EnsureDirectory(workingPath);
const Model::ActiveArea& newActiveArea = activeAreas->at(activeAreaIndex);
// If the current active area covers the entire image, return
if (activeAreaLoaded && currentActiveArea.boxWithMargins.boxSize == config->imageSize)
{
return;
}
// If the new active area is the same as the old one
if (activeAreaLoaded && newActiveArea.boxWithMargins == currentActiveArea.boxWithMargins)
{
return;
}
Core::Path::EnsureDirectory(workingPath);
// Save the current active area to the disk, if necessary
if (activeAreaLoaded && imageChanged)
{
WriteCurrentActiveAreaSafe();
}
imageChanged = false;
ReindexMemory(currentActiveArea, newActiveArea);
currentActiveArea = newActiveArea;
ReadCurrentActiveAreaSafe();
activeAreaLoaded = true;
}
// Writes current active area to disk, if necessary
OVERRIDE void WriteCurrentActiveArea() const
{
if (activeAreaLoaded && imageChanged)
{
Core::Path::EnsureDirectory(workingPath);
WriteCurrentActiveAreaSafe();
}
}
OVERRIDE void Clear()
{
if (dataArray != NULL)
{
Core::MemoryUtility::Free3DArray(dataArray);
dataArray = NULL;
}
}
private:
void ReadCurrentActiveAreaSafe()
{
// Load the next active area from disk
int startImageIndex = currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
int endImageIndex = currentActiveArea.boxWithMargins.exclusiveRightCorner[Core::Axis::Z];
printf("Reading images %d - %d from folder %s...\n", startImageIndex + 1, endImageIndex, workingPath.c_str());
#pragma omp parallel for schedule(static)
for (int i = startImageIndex; i < endImageIndex; ++i)
{
if ((i - startImageIndex + 1) % 10 == 0)
{
printf("Reading image %d / %d...\n", i - startImageIndex + 1, endImageIndex - startImageIndex);
}
std::string imagePath = imageFilePaths[i];
if (!Core::Path::Exists(imagePath))
{
// Copy image from the initial folder path (to preserve all the metadata)
Core::Path::CopyFile(initialImageFilePaths[i], imagePath);
// Prepare the image and fill in the entire image with default values
ResizeAndResetImage(imagePath, defaultValue);
// Fill in the image in memory with default values
// NOTE: I may proceed to the function CopySubImage below as well, but it's a little slower
int localZIndex = i - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
int elementsCount = currentActiveArea.boxWithMargins.boxSize[Core::Axis::X] * currentActiveArea.boxWithMargins.boxSize[Core::Axis::Y];
TData* valuesArray = dataArray[localZIndex][0]; // NOTE: I know that Allocate3DArray allocates the data in last dimension sequentially
std::fill(valuesArray, valuesArray + elementsCount, defaultValue);
}
else
{
Rectangle imageRectangle(currentActiveArea.boxWithMargins);
TData** currentImageActiveArea = GetCurrentImageActiveArea(i);
LoadRectangle(imagePath, imageRectangle, currentImageActiveArea);
}
}
}
void WriteCurrentActiveAreaSafe() const
{
int startImageIndex = currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
int endImageIndex = currentActiveArea.boxWithMargins.exclusiveRightCorner[Core::Axis::Z];
printf("Writing images %d - %d to folder %s...\n", startImageIndex + 1, endImageIndex, workingPath.c_str());
#pragma omp parallel for schedule(static)
for (int i = startImageIndex; i < endImageIndex; ++i)
{
if ((i - startImageIndex + 1) % 10 == 0)
{
printf("Writing image %d / %d...\n", i - startImageIndex + 1, endImageIndex - startImageIndex);
}
std::string imagePath = imageFilePaths[i];
if (!Core::Path::Exists(imagePath))
{
// Copy an initial image from the initial images folder
Core::Path::CopyFile(initialImageFilePaths[i], imageFilePaths[i]);
}
Rectangle imageRectangle(currentActiveArea.boxWithMargins);
TData** currentImageActiveArea = GetCurrentImageActiveArea(i);
SaveRectangle(imagePath, imageRectangle, currentImageActiveArea);
}
}
void AllocateMemory()
{
// The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// but adding them together makes z axis the major one.
size_t maxZSize = 0;
size_t maxZXSize = 0;
size_t maxActiveAreaSize = 0;
// Find max active area size, allocate memory
for (size_t i = 0; i < activeAreas->size(); ++i)
{
const Core::DiscreteSpatialVector& boxSize = activeAreas->at(i).boxWithMargins.boxSize;
size_t currentAreaSize = Core::VectorUtilities::GetProductGeneric<Core::DiscreteSpatialVector, size_t>(boxSize);
if (currentAreaSize > maxActiveAreaSize)
{
maxActiveAreaSize = currentAreaSize;
}
size_t currentZXSize = static_cast<size_t>(boxSize[Core::Axis::Z]) * boxSize[Core::Axis::X];
if (currentZXSize > maxZXSize)
{
maxZXSize = currentZXSize;
}
if (static_cast<size_t>(boxSize[Core::Axis::Z]) > maxZSize)
{
maxZSize = boxSize[Core::Axis::Z];
}
}
dataArray = new TData** [maxZSize];
dataArray[0] = new TData* [maxZXSize];
dataArray[0][0] = new TData[maxActiveAreaSize];
}
void ResizeAndResetImage(std::string imagePath, TData defaultValue) const
{
Image<TData> currentImage;
currentImage.Load(imagePath);
currentImage.EnsureBitsPerPixel();
currentImage.ResizeIfNecessary(config->imageSize[Core::Axis::X], config->imageSize[Core::Axis::Y]);
currentImage.Fill(defaultValue);
currentImage.Save(imagePath);
}
void LoadRectangle(std::string imagePath, const Model::Rectangle& rectangle, TData** values) const
{
Image<TData> currentImage;
currentImage.Load(imagePath);
currentImage.CheckBitsPerPixel();
currentImage.CheckSize(config->imageSize[Core::Axis::X], config->imageSize[Core::Axis::Y]);
currentImage.ReadRectangle(rectangle, values);
}
void SaveRectangle(std::string imagePath, const Model::Rectangle& rectangle, TData** values) const
{
Image<TData> currentImage;
currentImage.Load(imagePath);
currentImage.EnsureBitsPerPixel();
currentImage.ResizeIfNecessary(config->imageSize[Core::Axis::X], config->imageSize[Core::Axis::Y]);
currentImage.WriteRectangle(rectangle, values);
currentImage.Save(imagePath);
}
TData** GetCurrentImageActiveArea(int zIndex) const
{
int localZIndex = zIndex - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
TData** currentImageActiveArea = dataArray[localZIndex];
return currentImageActiveArea;
}
void ReindexMemory(const ActiveArea& previousActiveArea, const ActiveArea& currentActiveArea)
{
bool existingImageSizeChanges = activeAreaLoaded && (previousActiveArea.boxWithMargins.boxSize != currentActiveArea.boxWithMargins.boxSize);
if (!activeAreaLoaded || existingImageSizeChanges)
{
const Core::DiscreteSpatialVector& boxSize = currentActiveArea.boxWithMargins.boxSize;
// The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// but adding them together makes z axis the major one.
Core::MemoryUtility::ReindexMemory<TData>(boxSize[2], boxSize[0], boxSize[1], dataArray);
}
}
void FillImagePaths()
{
// read final images
Services::Serializer::FillImagePaths(workingPath, &imageFilePaths);
// if their count is OK
size_t expectedImageCount = static_cast<size_t>(config->imageSize[Core::Axis::Z]);
if (imageFilePaths.size() == expectedImageCount)
{
// copy them to the initialImageFilePaths
initialImageFilePaths.resize(expectedImageCount);
Core::StlUtilities::Copy(imageFilePaths, &initialImageFilePaths);
}
// if it is zero
else if (imageFilePaths.size() == 0)
{
// read initial images
std::string initialImagesFolder = Core::Path::Append(config->baseFolder, INITIAL_IMAGES_FOLDER_NAME);
Services::Serializer::FillImagePaths(initialImagesFolder, &initialImageFilePaths);
// if their count is not OK, throw exception
if (initialImageFilePaths.size() != expectedImageCount)
{
// throw exception
throw Core::InvalidOperationException("Working folder contains no images and initial folder contains wrong number of images");
}
// copy them to the image path, change the folder
ChangeFolder(initialImagesFolder, workingPath, initialImageFilePaths, &imageFilePaths);
}
// if it is not zero
else
{
// throw exception
throw Core::InvalidOperationException("Number of images is incorrect: it is neither zero nor the expected number of images");
}
}
void ChangeFolder(std::string sourceFolder, std::string targetFolder, const std::vector<std::string>& sourcePaths, std::vector<std::string>* targetPaths) const
{
std::vector<std::string>& targetPathsRef = *targetPaths;
targetPathsRef.resize(sourcePaths.size());
for (size_t i = 0; i < targetPathsRef.size(); ++i)
{
std::string sourcePath = sourcePaths[i];
std::string targetName = Core::Path::GetFileName(sourcePath);
targetPathsRef[i] = Core::Path::Append(targetFolder, targetName);
}
}
DISALLOW_COPY_AND_ASSIGN(DataArray);
};
}
#endif /* ImageProcessing_Model_Headers_DataArray_h */
|
GB_unop__bnot_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int8_int8)
// op(A') function: GB (_unop_tran__bnot_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
concom.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include "app-desc.h"
#include "bots.h"
// bots_arg_size == Number of nodes
// bots_arg_size_1 == Maximum number of neighbors per node
// bots_arg_size_2 == Number of links in the entire graph
node *nodes;
int *visited, *components;
// Checks to see if two nodes can be linked
int linkable(int N1, int N2) {
int i;
if (N1 == N2) return (0);
if (nodes[N1].n >= bots_arg_size_1) return (0);
if (nodes[N2].n >= bots_arg_size_1) return (0);
for (i = 0; i < nodes[N1].n; i++)
if (nodes[N1].neighbor[i] == N2) return (0);
return (1);
}
// Allocates and creates a graph with random links between nodes
// also allocates visited and components vectors
void initialize() {
int i, l1, l2, N1, N2;
double RN;
nodes = (node *) malloc(bots_arg_size * sizeof(node));
visited = (int *) malloc(bots_arg_size * sizeof(int));
components = (int *) malloc(bots_arg_size * sizeof(int));
/* initialize nodes */
for (i = 0; i < bots_arg_size; i++) {
nodes[i].n = 0;
nodes[i].neighbor = (int *) malloc(bots_arg_size_1 * sizeof(int));
}
/* for each link, generate end nodes and link */
for (i = 0; i < bots_arg_size_2; i++)
{
RN = rand() / (double) RAND_MAX;
N1 = (int) ((bots_arg_size-1) * RN);
RN = rand() / (double) RAND_MAX;
N2 = (int) ((bots_arg_size-1) * RN);
if (linkable(N1, N2)) {
l1 = nodes[N1].n;
l2 = nodes[N2].n;
nodes[N1].neighbor[l1] = N2;
nodes[N2].neighbor[l2] = N1;
nodes[N1].n += 1;
nodes[N2].n += 1;
}
}
}
// Writes the number of CCs
void write_outputs(int n, int cc) {
int i;
printf("Graph %d, Number of components %d\n", n, cc);
if (bots_verbose_mode)
for (i = 0; i < cc; i++)
printf("Component %d Size: %d\n", i, components[i]);
}
// Marks a node and all its neighbors as part of the CC
void CC_par (int i, int cc)
{
int j, n;
/* if node has not been visited */
if (visited[i] == 0) {
/* add node to current component */
if (bots_verbose_mode) printf("Adding node %d to component %d\n", i, cc);
#pragma omp critical
{
visited[i] = 1;
components[cc]++;
}
/* add each neighbor's subtree to the current component */
for (j = 0; j < nodes[i].n; j++)
{
n = nodes[i].neighbor[j];
#pragma omp task untied firstprivate (i,cc)
{CC_par(n, cc);}
}
#pragma omp taskwait
}
}
void CC_seq (int i, int cc)
{
int j, n;
/* if node has not been visited */
if (visited[i] == 0) {
/* add node to current component */
if (bots_verbose_mode) printf("Adding node %d to component %d\n", i, cc);
{
visited[i] = 1;
components[cc]++;
}
/* add each neighbor's subtree to the current component */
for (j = 0; j < nodes[i].n; j++)
{
n = nodes[i].neighbor[j];
CC_seq(n, cc);
}
}
}
void cc_init()
{
int i;
/* initialize global data structures */
for (i = 0; i < bots_arg_size; i++)
{
visited[i] = 0;
components[i] = 0;
}
}
void cc_par(int *cc)
{
int i;
*cc = 0;
/* for all nodes ... unvisited nodes start a new component */
#pragma omp parallel
#pragma omp single
#pragma omp task untied
for (i = 0; i < bots_arg_size; i++)
{
if (visited[i] == 0)
{
#pragma omp task untied firstprivate (i,cc)
{CC_par(i, *cc);}
#pragma omp taskwait
(*cc)++;
}
}
}
void cc_seq(int *cc)
{
int i;
(*cc) = 0;
/* for all nodes ... unvisited nodes start a new component */
for (i = 0; i < bots_arg_size; i++)
{
if (visited[i] == 0)
{
CC_par(i, *cc);
(*cc)++;
}
}
}
int cc_check(int ccs, int ccp)
{
if (bots_verbose_mode) fprintf(stdout, "Sequential = %d CC, Parallel =%d CC\n", ccs, ccp);
if (ccs == ccp) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2;
internal::manage_caching_sizes(GetAction, &l1, &l2);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
int volatile sync;
int volatile users;
Index rhs_start;
Index rhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// 1- are we already in a parallel session?
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Index size = transpose ? cols : rows;
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
Index max_threads = std::max<Index>(1,size / 32);
// 3 - compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), max_threads);
if(threads==1)
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession();
if(transpose)
std::swap(rows,cols);
Index blockCols = (cols / threads) & ~Index(0x3);
Index blockRows = (rows / threads) & ~Index(0x7);
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
#pragma omp parallel for schedule(static,1) num_threads(threads)
for(Index i=0; i<threads; ++i)
{
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
if(transpose)
func(0, cols, r0, actualBlockRows, info);
else
func(r0, actualBlockRows, 0,cols, info);
}
delete[] info;
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
openmp_wrapper.h | #ifndef LIGHTGBM_OPENMP_WRAPPER_H_
#define LIGHTGBM_OPENMP_WRAPPER_H_
#ifdef _OPENMP
#include <omp.h>
#include <exception>
#include <stdexcept>
#include <mutex>
#include <vector>
#include <memory>
#include "log.h"
class ThreadExceptionHelper {
public:
ThreadExceptionHelper() {
ex_ptr_ = nullptr;
}
~ThreadExceptionHelper() {
ReThrow();
}
void ReThrow() {
if (ex_ptr_ != nullptr) {
std::rethrow_exception(ex_ptr_);
}
}
void CaptureException() {
// only catch first exception.
if (ex_ptr_ != nullptr) { return; }
std::unique_lock<std::mutex> guard(lock_);
if (ex_ptr_ != nullptr) { return; }
ex_ptr_ = std::current_exception();
}
private:
std::exception_ptr ex_ptr_;
std::mutex lock_;
};
#define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper
#define OMP_LOOP_EX_BEGIN() try {
#define OMP_LOOP_EX_END() } \
catch(std::exception& ex) { Log::Warning(ex.what()); omp_except_helper.CaptureException(); } \
catch(...) { omp_except_helper.CaptureException(); }
#define OMP_THROW_EX() omp_except_helper.ReThrow()
#else
#ifdef _MSC_VER
#pragma warning( disable : 4068 ) // disable unknown pragma warning
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** Fall here if no OPENMP support, so just
simulate a single thread running.
All #pragma omp should be ignored by the compiler **/
inline void omp_set_num_threads(int) {}
inline void omp_set_nested(int) {}
inline int omp_get_num_threads() {return 1;}
inline int omp_get_thread_num() {return 0;}
#ifdef __cplusplus
}; // extern "C"
#endif
#define OMP_INIT_EX()
#define OMP_LOOP_EX_BEGIN()
#define OMP_LOOP_EX_END()
#define OMP_THROW_EX()
#endif
#endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
|
sum_firstStrategy.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
/*
-quantity: quantity of numbers
-threads: thread team working
-localQuantity: amount of numbers each thread must add up
-rest: Calculated to check if it is exactly divisible by the number of threads
-step: It allows each core to know which elements it needs to deal with
-*container: container of numbers
*/
int i, quantity, threads, localQuantity, rest, id, step;
float sumtot, sum, *container;
sumtot = 0;
printf("How many nymbers do you want to sum?\n");
scanf("%d", &quantity);
/*Dynamic allocation based on how many numbers the user has chosen*/
container = (float *)calloc(quantity, sizeof(float));
printf("Please enter a number to sum:\n");
for(i=0; i<quantity; i++)
{
scanf("%f", &container[i]);
}
#pragma omp parallel private(sum, localQuantity, i, id, step) shared(sumtot,rest)
{
threads = omp_get_num_threads();
localQuantity = quantity/threads;
id = omp_get_thread_num();
rest = quantity%threads;
/*If the amount of numbers to be added is not exactly divisible by the number of threads,
more numbers must be assigned to the cores that have the smaller identifier than the rest,
in order to balance the number of operations to be performed.*/
if( id < rest){
localQuantity++;
step = 0;
}
else{
step = rest;
}
sum=0;
for(i=0; i<localQuantity; i++)
{
sum = sum+container[i+localQuantity*omp_get_thread_num()+step];
}
sumtot = sumtot + sum;
}
printf("Total sum: %f\n", sumtot);
free(container);
return 0;
}
|
mandelbrot.c | #include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
#include <stdbool.h>
#include <immintrin.h>
#include <assert.h>
#ifdef __APPLE__
#include <SDL2/SDL.h>
#else
#include "SDL.h"
#endif
#define WIDTH 800
#define ITERATIONS 350 // TODO: should depend on s
static double const ITERATIONS_TO_COLOR = 255.0 / (double) ITERATIONS;
static __m256d _4d;
static __m256i _7i;
static __m256i _16i;
static __m256i _ITERi;
static void color256(__m256d const a0s, __m256d const b0s, uint32_t * const out) {
__m256d as = a0s, bs = b0s;
__m256i cs = _ITERi;
__m256d asqr = _mm256_mul_pd(as, as);
__m256d bsqr = _mm256_mul_pd(bs, bs);
for (uint64_t i = 0; i < ITERATIONS; i++) {
__m256i const mask4 = _mm256_castpd_si256
(_mm256_cmp_pd(_mm256_add_pd(asqr, bsqr), _4d, _CMP_GT_OQ));
__m256i const maskITER = _mm256_cmpeq_epi64(cs, _ITERi);
__m256i const mask = _mm256_and_si256(maskITER, mask4);
__m256i const newcs = _mm256_set1_epi64x(i);
cs = _mm256_blendv_epi8(cs, newcs, mask);
if (_mm256_testz_si256(maskITER, maskITER))
break;
bs = _mm256_mul_pd(as, bs);
bs = _mm256_add_pd(_mm256_add_pd(bs, bs), b0s);
as = _mm256_add_pd(_mm256_sub_pd(asqr, bsqr), a0s);
asqr = _mm256_mul_pd(as, as);
bsqr = _mm256_mul_pd(bs, bs);
}
cs = _mm256_castpd_si256(_mm256_mul_pd(_mm256_castsi256_pd(cs), _mm256_set1_pd(ITERATIONS_TO_COLOR)));
cs = _mm256_add_epi64(cs, _mm256_add_epi64(_mm256_sllv_epi64(cs, _7i), _mm256_sllv_epi64(cs, _16i)));
out[0] = _mm256_extract_epi64(cs, 0);
out[1] = _mm256_extract_epi64(cs, 1);
out[2] = _mm256_extract_epi64(cs, 2);
out[3] = _mm256_extract_epi64(cs, 3);
}
int main() {
_4d = _mm256_set1_pd(4);
_7i = _mm256_set1_epi64x(6);
_16i = _mm256_set1_epi64x(16);
_ITERi = _mm256_set1_epi64x(ITERATIONS);
assert(SDL_Init(SDL_INIT_VIDEO) == 0);
SDL_Window * win = SDL_CreateWindow("Mandelbrot", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WIDTH, WIDTH, SDL_WINDOW_SHOWN);
assert(win != NULL);
SDL_Renderer * ren = SDL_CreateRenderer(win, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
assert(ren != NULL);
SDL_Texture * texture = SDL_CreateTexture(ren, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STATIC, WIDTH, WIDTH);
assert(texture != NULL);
uint32_t * const __attribute__ ((aligned (16))) pixels = malloc(WIDTH * WIDTH * sizeof(*pixels));
memset(pixels, 255, WIDTH * WIDTH * sizeof(*pixels));
double s = 10;
double const x0 = 0.10684f, y0 = -0.63675, gsub = WIDTH-1;
SDL_Event e;
bool quit = false;
while (!quit) {
double x0s2 = x0 - s/2, y0s2 = y0 - s/2, sgsub = s / gsub;
#pragma omp parallel for
for (int j = 0; j < WIDTH; j++) {
__m256d as = _mm256_set1_pd(x0s2 + sgsub * j);
for (int k = 0; k < WIDTH; k += 4) {
__m256d bs = _mm256_set_pd
(y0s2 + sgsub * (k + 3),
y0s2 + sgsub * (k + 2),
y0s2 + sgsub * (k + 1),
y0s2 + sgsub * (k + 0));
color256(as, bs, pixels + j*WIDTH + k);
}
}
s *= 0.975;
SDL_UpdateTexture(texture, NULL, pixels, WIDTH * sizeof(*pixels));
while (SDL_PollEvent(&e)){
switch (e.type) {
case SDL_QUIT:
quit = true;
break;
}
}
SDL_RenderCopy(ren, texture, NULL, NULL);
SDL_RenderPresent(ren);
}
free(pixels);
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(ren);
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
}
|
setround_omp.c | #include <omp.h>
#include <fenv.h>
#include "mex.h"
#pragma STDC FENV_ACCESS ON
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
int rnd[] = {FE_DOWNWARD,FE_TONEAREST,FE_UPWARD,FE_TOWARDZERO};
register int mode;
/*mode = *(mxGetPr(prhs[0]));*/
mode = (int) mxGetScalar(prhs[0]);
mode = mode + 1;
#pragma omp parallel shared(mode,rnd)
{
fesetround(rnd[mode]);
}
}
/* the same like as fesetround, but in assembler
__asm__ ("fnstcw %0" : "=m" (cw));
cw &= 0x3ff;
cw |= mode;
__asm__ ("fldcw %0" : : "m" (cw));
__asm__ ("stmxcsr %0" : "=m" (mxcsr));
mxcsr &= ~ 0x6000;
mxcsr |= mode << 3;
__asm__ ("ldmxcsr %0" : : "m" (mxcsr));
*/
|
CPSfield_utils.h | #ifndef CPS_FIELD_UTILS_H
#define CPS_FIELD_UTILS_H
CPS_START_NAMESPACE
inline void compareFermion(const CPSfermion5D<ComplexD> &A, const CPSfermion5D<ComplexD> &B, const std::string &descr = "Ferms", const double tol = 1e-9){
double fail = 0.;
for(int i=0;i<GJP.VolNodeSites()*GJP.SnodeSites();i++){
int x[5]; int rem = i;
for(int ii=0;ii<5;ii++){ x[ii] = rem % GJP.NodeSites(ii); rem /= GJP.NodeSites(ii); }
for(int f=0;f<GJP.Gparity()+1;f++){
for(int sc=0;sc<24;sc++){
double vbfm = *((double*)A.site_ptr(i,f) + sc);
double vgrid = *((double*)B.site_ptr(i,f) + sc);
double diff_rat = fabs( 2.0 * ( vbfm - vgrid )/( vbfm + vgrid ) );
double rat_grid_bfm = vbfm/vgrid;
if(vbfm == 0.0 && vgrid == 0.0){ diff_rat = 0.; rat_grid_bfm = 1.; }
if( (vbfm == 0.0 && fabs(vgrid) < 1e-50) || (vgrid == 0.0 && fabs(vbfm) < 1e-50) ){ diff_rat = 0.; rat_grid_bfm = 1.; }
if(diff_rat > tol){
printf("Fail: (%d,%d,%d,%d,%d; %d; %d) A %g B %g rat_A_B %g fracdiff %g\n",x[0],x[1],x[2],x[3],x[4],f,sc,vbfm,vgrid,rat_grid_bfm,diff_rat);
fail = 1.0;
}//else printf("Pass: (%d,%d,%d,%d,%d; %d; %d) A %g B %g rat_A_B %g fracdiff %g\n",x[0],x[1],x[2],x[3],x[4],f,sc,vbfm,vgrid,rat_grid_bfm,diff_rat);
}
}
}
glb_max(&fail);
if(fail!=0.0){
if(!UniqueID()){ printf("Failed %s check\n", descr.c_str()); fflush(stdout); }
exit(-1);
}else{
if(!UniqueID()){ printf("Passed %s check\n", descr.c_str()); fflush(stdout); }
}
}
template<typename FieldType, typename my_enable_if<_equal<typename ComplexClassify<typename FieldType::FieldSiteType>::type, complex_double_or_float_mark>::value,int>::type = 0>
inline void compareField(const FieldType &A, const FieldType &B, const std::string &descr = "Field", const double tol = 1e-9, bool print_all = false){
typedef typename FieldType::FieldSiteType::value_type value_type;
double fail = 0.;
for(int xf=0;xf<A.nfsites();xf++){
int f; int x[FieldType::FieldDimensionPolicy::EuclideanDimension];
A.fsiteUnmap(xf, x,f);
for(int i=0;i<FieldType::FieldSiteSize;i++){
value_type const* av = (value_type const*)(A.fsite_ptr(xf)+i);
value_type const* bv = (value_type const*)(B.fsite_ptr(xf)+i);
for(int reim=0;reim<2;reim++){
value_type diff_rat = (av[reim] == 0.0 && bv[reim] == 0.0) ? 0.0 : fabs( 2.*(av[reim]-bv[reim])/(av[reim]+bv[reim]) );
if(diff_rat > tol || print_all){
if(!print_all) std::cout << "Fail: (";
else std::cout << "Pass: (";
for(int xx=0;xx<FieldType::FieldDimensionPolicy::EuclideanDimension-1;xx++)
std::cout << x[xx] << ", ";
std::cout << x[FieldType::FieldDimensionPolicy::EuclideanDimension-1];
std::cout << ") f=" << f << " reim " << reim << " A " << av[reim] << " B " << bv[reim] << " fracdiff " << diff_rat << std::endl;
if(!print_all) fail = 1.;
}
}
}
}
glb_max(&fail);
if(fail!=0.0){
if(!UniqueID()){ printf("Failed %s check\n", descr.c_str()); fflush(stdout); }
exit(-1);
}else{
if(!UniqueID()){ printf("Passed %s check\n", descr.c_str()); fflush(stdout); }
}
}
#ifdef USE_BFM
inline void exportBFMcb(CPSfermion5D<ComplexD> &into, Fermion_t from, bfm_evo<double> &dwf, int cb, bool singleprec_evec = false){
Fermion_t zero_a = dwf.allocFermion();
#pragma omp parallel
{
dwf.set_zero(zero_a);
}
Fermion_t etmp = dwf.allocFermion();
Fermion_t tmp[2];
tmp[!cb] = zero_a;
if(singleprec_evec){
const int len = 24 * dwf.node_cbvol * (1 + dwf.gparity) * dwf.cbLs;
#pragma omp parallel for
for(int j = 0; j < len; j++) {
((double*)etmp)[j] = ((float*)(from))[j];
}
tmp[cb] = etmp;
}else tmp[cb] = from;
dwf.cps_impexFermion(into.ptr(),tmp,0);
dwf.freeFermion(zero_a);
dwf.freeFermion(etmp);
}
#endif
#ifdef USE_GRID
template<typename GridPolicies>
inline void exportGridcb(CPSfermion5D<ComplexD> &into, typename GridPolicies::GridFermionField &from, typename GridPolicies::FgridFclass &latg){
Grid::GridCartesian *FGrid = latg.getFGrid();
typename GridPolicies::GridFermionField tmp_g(FGrid);
tmp_g = Grid::zero;
setCheckerboard(tmp_g, from);
latg.ImportFermion((Vector*)into.ptr(), tmp_g);
}
#endif
#ifdef USE_QMP
//Cyclic permutation of *4D* CPSfield with std::complex type and FourDpolicy dimension policy
//Conventions are direction of *data flow*: For shift n in direction +1 f'(x) = f(x-\hat i) so data is sent in the +x direction.
#define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, complex_double_or_float_mark>::value && (_equal<DimensionPolicy,FourDpolicy>::value || _equal<DimensionPolicy,SpatialPolicy>::value)
template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from,
const int dir, const int pm, const int n,
typename my_enable_if<CONDITION , const int>::type dummy = 0){
enum {Dimension = DimensionPolicy::EuclideanDimension};
assert(dir < Dimension);
assert(n < GJP.NodeSites(dir));
assert(pm == 1 || pm == -1);
if(&to == &from){
if(n==0) return;
CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from);
return cyclicPermute(to,tmpfrom,dir,pm,n);
}
if(n == 0){
to = from;
return;
}
QMP_barrier();
//Prepare face to send. If we send in the + direction we need to collect the slice starting {L-n ... L-1} (inclusive), and if we send in the - dir we collect the slice {0... n-1}
int bsites = n; //sites on boundary
int bsizes[Dimension]; bsizes[dir] = n;
int boff[Dimension]; boff[dir] = (pm == 1 ? GJP.NodeSites(dir)-n : 0);
for(int i=0;i<Dimension;i++)
if(i != dir){
bsizes[i] = GJP.NodeSites(i);
bsites *= bsizes[i];
boff[i] = 0;
}
int flav_off = from.flav_offset();
int nf = from.nflavors();
int bufsz = bsites * SiteSize * nf;
int halfbufsz = bufsz/2;
QMP_mem_t *recv_mem = QMP_allocate_memory(bufsz * sizeof(mf_Complex));
mf_Complex *recv_buf = (mf_Complex *)QMP_get_memory_pointer(recv_mem);
QMP_mem_t *send_mem = QMP_allocate_memory(bufsz * sizeof(mf_Complex));
mf_Complex *send_buf = (mf_Complex *)QMP_get_memory_pointer(send_mem);
#pragma omp parallel for
for(int i=0;i<bsites;i++){
int rem = i;
int coor[Dimension];
for(int d=0;d<Dimension;d++){ coor[d] = rem % bsizes[d] + boff[d]; rem/=bsizes[d]; }
mf_Complex const* site_ptr = from.site_ptr(coor);
mf_Complex* bp = send_buf + i*SiteSize;
memcpy(bp,site_ptr,SiteSize*sizeof(mf_Complex));
if(nf == 2){
site_ptr += flav_off;
bp += halfbufsz;
memcpy(bp,site_ptr,SiteSize*sizeof(mf_Complex));
}
}
QMP_barrier();
//Copy remaining sites from on-node data with shift
int rsizes[Dimension]; rsizes[dir] = GJP.NodeSites(dir) - n;
int rsites = GJP.NodeSites(dir) - n;
//if we sent in the + direction we need to shift the remaining L-n sites {0...L-n-1} forwards by n to make way for a new slice at the left side
//if we sent in the - direction we need to shift the remaining L-n sites {n ... L-1} backwards by n to make way for a new slice at the right side
int roff[Dimension]; roff[dir] = (pm == 1 ? 0 : n);
for(int i=0;i<Dimension;i++)
if(i != dir){
rsizes[i] = GJP.NodeSites(i);
rsites *= rsizes[i];
roff[i] = 0;
}
#pragma omp parallel for
for(int i=0;i<rsites;i++){
int rem = i;
int from_coor[Dimension];
for(int d=0;d<Dimension;d++){ from_coor[d] = rem % rsizes[d] + roff[d]; rem/=rsizes[d]; }
int to_coor[Dimension]; memcpy(to_coor,from_coor,Dimension*sizeof(int));
to_coor[dir] = (pm == +1 ? from_coor[dir] + n : from_coor[dir] - n);
mf_Complex const* from_ptr = from.site_ptr(from_coor);
mf_Complex * to_ptr = to.site_ptr(to_coor);
memcpy(to_ptr,from_ptr,SiteSize*sizeof(mf_Complex));
if(nf == 2){
from_ptr += flav_off;
to_ptr += flav_off;
memcpy(to_ptr,from_ptr,SiteSize*sizeof(mf_Complex));
}
}
//Send/receive
QMP_msgmem_t send_msg = QMP_declare_msgmem(send_buf,bufsz * sizeof(mf_Complex));
QMP_msgmem_t recv_msg = QMP_declare_msgmem(recv_buf,bufsz * sizeof(mf_Complex));
QMP_msghandle_t send = QMP_declare_send_relative(send_msg, dir, pm, 0);
QMP_msghandle_t recv = QMP_declare_receive_relative(recv_msg, dir, -pm, 0);
QMP_start(recv);
QMP_start(send);
QMP_status_t send_status = QMP_wait(send);
if (send_status != QMP_SUCCESS)
QMP_error("Send failed in cyclicPermute: %s\n", QMP_error_string(send_status));
QMP_status_t rcv_status = QMP_wait(recv);
if (rcv_status != QMP_SUCCESS)
QMP_error("Receive failed in PassDataT: %s\n", QMP_error_string(rcv_status));
//Copy received face into position. For + shift the origin we copy into is the left-face {0..n-1}, for a - shift its the right-face {L-n .. L-1}
boff[dir] = (pm == 1 ? 0 : GJP.NodeSites(dir)-n);
#pragma omp parallel for
for(int i=0;i<bsites;i++){
int rem = i;
int coor[Dimension];
for(int d=0;d<Dimension;d++){ coor[d] = rem % bsizes[d] + boff[d]; rem/=bsizes[d]; }
mf_Complex * site_ptr = to.site_ptr(coor);
mf_Complex const* bp = recv_buf + i*SiteSize;
memcpy(site_ptr,bp,SiteSize*sizeof(mf_Complex));
if(nf == 2){
site_ptr += flav_off;
bp += halfbufsz;
memcpy(site_ptr,bp,SiteSize*sizeof(mf_Complex));
}
}
QMP_free_msghandle(send);
QMP_free_msghandle(recv);
QMP_free_msgmem(send_msg);
QMP_free_msgmem(recv_msg);
QMP_free_memory(send_mem);
QMP_free_memory(recv_mem);
QMP_barrier();
}
#undef CONDITION
# ifdef USE_GRID
#define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, grid_vector_complex_mark>::value && (_equal<DimensionPolicy,FourDSIMDPolicy>::value || _equal<DimensionPolicy,ThreeDSIMDPolicy>::value)
//Version with SIMD vectorized data
template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from,
const int dir, const int pm, const int n,
typename my_enable_if<CONDITION, const int>::type dummy = 0){
enum {Dimension = DimensionPolicy::EuclideanDimension};
assert(dir < Dimension);
assert(n < GJP.NodeSites(dir));
assert(pm == 1 || pm == -1);
if(&to == &from){
if(n==0) return;
CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from);
return cyclicPermute(to,tmpfrom,dir,pm,n);
}
if(n == 0){
to = from;
return;
}
const int nsimd = mf_Complex::Nsimd();
//Use notation c (combined index), o (outer index) i (inner index)
int bcsites = n; //sites on boundary
int bcsizes[Dimension]; bcsizes[dir] = n;
int bcoff[Dimension]; bcoff[dir] = (pm == 1 ? GJP.NodeSites(dir)-n : 0);
int bcoff_postcomms[Dimension]; bcoff_postcomms[dir] = (pm == 1 ? 0 : GJP.NodeSites(dir)-n);
for(int i=0;i<Dimension;i++)
if(i != dir){
bcsizes[i] = GJP.NodeSites(i);
bcsites *= bcsizes[i];
bcoff[i] = 0;
bcoff_postcomms[i] = 0;
}
//Build table of points on face (both outer and inner index)
int nf = from.nflavors();
int flav_off = from.flav_offset();
typedef typename Grid::GridTypeMapper<mf_Complex>::scalar_type scalarType;
int bufsz = bcsites * SiteSize * nf;
QMP_mem_t *recv_mem = QMP_allocate_memory(bufsz * sizeof(scalarType));
scalarType *recv_buf = (scalarType *)QMP_get_memory_pointer(recv_mem);
QMP_mem_t *send_mem = QMP_allocate_memory(bufsz * sizeof(scalarType));
scalarType *send_buf = (scalarType *)QMP_get_memory_pointer(send_mem);
int osites = from.nsites();
std::vector<int> to_oi_buf_map(nf * osites * nsimd); //map from outer and inner index of destination site to offset within buffer, used *after* comms.
//map i + nsimd*(o + osites*f) as index
#pragma omp parallel for
for(int c=0;c<bcsites;c++){
int rem = c;
int coor[Dimension];
for(int d=0;d<Dimension;d++){ coor[d] = rem % bcsizes[d]; rem/=bcsizes[d]; }
int coor_dest[Dimension];
for(int d=0;d<Dimension;d++){
coor_dest[d] = coor[d] + bcoff_postcomms[d];
coor[d] += bcoff[d];
}
int i = from.SIMDmap(coor);
int o = from.siteMap(coor);
int i_dest = from.SIMDmap(coor_dest);
int o_dest = from.siteMap(coor_dest);
Grid::Vector<scalarType> ounpacked(nsimd);
for(int f=0;f<nf;f++){
mf_Complex const *osite_ptr = from.site_ptr(o,f);
int send_buf_off = (c + bcsites*f)*SiteSize;
scalarType* bp = send_buf + send_buf_off;
to_oi_buf_map[ i_dest + nsimd*(o_dest+osites*f) ] = send_buf_off;
for(int s=0;s<SiteSize;s++){
vstore(*(osite_ptr++), ounpacked.data());
*(bp++) = ounpacked[i];
}
}
}
//Send/receive
QMP_msgmem_t send_msg = QMP_declare_msgmem(send_buf,bufsz * sizeof(scalarType));
QMP_msgmem_t recv_msg = QMP_declare_msgmem(recv_buf,bufsz * sizeof(scalarType));
QMP_msghandle_t send = QMP_declare_send_relative(send_msg, dir, pm, 0);
QMP_msghandle_t recv = QMP_declare_receive_relative(recv_msg, dir, -pm, 0);
QMP_start(recv);
QMP_start(send);
QMP_status_t send_status = QMP_wait(send);
if (send_status != QMP_SUCCESS)
QMP_error("Send failed in cyclicPermute: %s\n", QMP_error_string(send_status));
QMP_status_t rcv_status = QMP_wait(recv);
if (rcv_status != QMP_SUCCESS)
QMP_error("Receive failed in PassDataT: %s\n", QMP_error_string(rcv_status));
//Copy remaining sites from on-node data with shift and pull in data from buffer simultaneously
//if we sent in the + direction we need to shift the remaining L-n sites {0...L-n-1} forwards by n to make way for a new slice at the left side
//if we sent in the - direction we need to shift the remaining L-n sites {n ... L-1} backwards by n to make way for a new slice at the right side
//Problem is we don't want two threads writing to the same AVX register at the same time. Therefore we thread the loop over the destination SIMD vectors and work back
std::vector< std::vector<int> > lane_offsets(nsimd, std::vector<int>(Dimension) );
for(int i=0;i<nsimd;i++) from.SIMDunmap(i, lane_offsets[i].data() );
#pragma omp parallel for
for(int oto = 0;oto < osites; oto++){
int oto_base_coor[Dimension]; to.siteUnmap(oto,oto_base_coor);
//For each destination lane compute the source site index and lane
int from_lane[nsimd];
int from_osite_idx[nsimd]; //also use for recv_buf offsets for sites pulled over boundary
for(int lane = 0; lane < nsimd; lane++){
int offrom_coor[Dimension];
for(int d=0;d<Dimension;d++) offrom_coor[d] = oto_base_coor[d] + lane_offsets[lane][d];
offrom_coor[dir] += (pm == 1 ? -n : n);
if(offrom_coor[dir] < 0 || offrom_coor[dir] >= GJP.NodeSites(dir)){
from_lane[lane] = -1; //indicates data is in recv_buf
from_osite_idx[lane] = to_oi_buf_map[ lane + nsimd*oto ]; //here is for flavor 0 - remember to offset for second flav
}else{
from_lane[lane] = from.SIMDmap(offrom_coor);
from_osite_idx[lane] = from.siteMap(offrom_coor);
}
}
//Now loop over flavor and element within the site as well as SIMD lanes of the destination vector and gather what we need to poke - then poke it
Grid::Vector<scalarType> towrite(nsimd);
Grid::Vector<scalarType> unpack(nsimd);
for(int f=0;f<nf;f++){
for(int s=0;s<SiteSize;s++){
for(int tolane=0;tolane<nsimd;tolane++){
if(from_lane[tolane] != -1){
mf_Complex const* from_osite_ptr = from.site_ptr(from_osite_idx[tolane], f) + s;
vstore(*from_osite_ptr,unpack.data());
towrite[tolane] = unpack[ from_lane[tolane] ];
}else{
//data is in buffer
towrite[tolane] = recv_buf[ from_osite_idx[tolane] + s + f*bcsites*SiteSize ];
}
}
mf_Complex* to_osite_ptr = to.site_ptr(oto,f) + s;
vset(*to_osite_ptr, towrite.data());
}
}
}
QMP_free_msghandle(send);
QMP_free_msghandle(recv);
QMP_free_msgmem(send_msg);
QMP_free_msgmem(recv_msg);
QMP_free_memory(send_mem);
QMP_free_memory(recv_mem);
QMP_barrier();
}
#undef CONDITION
# endif //ifdef USE_GRID
#else //ifdef USE_QMP
#define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, complex_double_or_float_mark>::value && (_equal<DimensionPolicy,FourDpolicy>::value || _equal<DimensionPolicy,SpatialPolicy>::value)
template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from,
const int dir, const int pm, const int n,
typename my_enable_if<CONDITION , const int>::type dummy = 0){
enum {Dimension = DimensionPolicy::EuclideanDimension};
assert(dir < Dimension);
assert(n < GJP.NodeSites(dir));
assert(pm == 1 || pm == -1);
if(&to == &from){
if(n==0) return;
CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from);
return cyclicPermute(to,tmpfrom,dir,pm,n);
}
if(n == 0){
to = from;
return;
}
const int nodes = GJP.Xnodes()*GJP.Ynodes()*GJP.Znodes()*GJP.Tnodes()*GJP.Snodes();
if(nodes != 1) ERR.General("","cyclicPermute","Parallel implementation requires QMP\n");
#pragma omp parallel for
for(int i=0;i<from.nfsites();i++){
int f; int x[Dimension];
from.fsiteUnmap(i,x,f);
x[dir] = (x[dir] + pm * n + 5*GJP.NodeSites(dir) ) % GJP.NodeSites(dir);
const mf_Complex* from_ptr = from.fsite_ptr(i);
mf_Complex* to_ptr = to.site_ptr(x,f);
memcpy(to_ptr,from_ptr,SiteSize*sizeof(mf_Complex));
}
}
#undef CONDITION
# ifdef USE_GRID
#define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, grid_vector_complex_mark>::value && (_equal<DimensionPolicy,FourDSIMDPolicy>::value || _equal<DimensionPolicy,ThreeDSIMDPolicy>::value)
//Version with SIMD vectorized data
template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy>
void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from,
const int dir, const int pm, const int n,
typename my_enable_if<CONDITION, const int>::type dummy = 0){
enum {Dimension = DimensionPolicy::EuclideanDimension};
assert(dir < Dimension);
assert(n < GJP.NodeSites(dir));
assert(pm == 1 || pm == -1);
if(&to == &from){
if(n==0) return;
CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from);
return cyclicPermute(to,tmpfrom,dir,pm,n);
}
if(n == 0){
to = from;
return;
}
const int nodes = GJP.Xnodes()*GJP.Ynodes()*GJP.Znodes()*GJP.Tnodes()*GJP.Snodes();
if(nodes != 1) ERR.General("","cyclicPermute","Parallel implementation requires QMP\n");
const int nsimd = mf_Complex::Nsimd();
typedef typename mf_Complex::scalar_type scalar_type;
const int nthr = omp_get_max_threads();
scalar_type* tmp_store_thr[nthr]; for(int i=0;i<nthr;i++) tmp_store_thr[i] = (scalar_type*)memalign(128,nsimd*sizeof(scalar_type));
#pragma omp parallel for
for(int ofto=0;ofto<to.nfsites();ofto++){ //loop over outer site index
const int me = omp_get_thread_num();
int f; int oxto[Dimension];
to.fsiteUnmap(ofto,oxto,f);
mf_Complex* to_base_ptr = to.fsite_ptr(ofto);
scalar_type* tmp_store = tmp_store_thr[me];
//indexed by destination lane
mf_Complex const* from_base_ptrs[nsimd];
int from_lane_idx[nsimd];
for(int tolane = 0; tolane < nsimd; tolane++){
int ixto_off[Dimension];
to.SIMDunmap(tolane,ixto_off); //get offset of inner site on tolane
int xfrom[Dimension]; for(int d=0;d<Dimension;d++) xfrom[d] = oxto[d] + ixto_off[d]; //full coord corresponding to tolane + outer site
xfrom[dir] = (xfrom[dir] - pm * n + 5*GJP.NodeSites(dir) ) % GJP.NodeSites(dir);
from_base_ptrs[tolane] = from.site_ptr(xfrom,f);
from_lane_idx[tolane] = from.SIMDmap(xfrom);
}
for(int s=0;s<SiteSize;s++){
for(int tolane = 0; tolane < nsimd; tolane++)
tmp_store[tolane] = *( (scalar_type*)(from_base_ptrs[tolane] + s) + from_lane_idx[tolane] ); //cast SIMD type to scalar type pointer
vset(*(to_base_ptr + s), tmp_store);
}
}
for(int i=0;i<nthr;i++) free(tmp_store_thr[i]);
}
#undef CONDITION
# endif //ifdef USE_GRID
#endif //ifdef USE_QMP
inline int getShiftSign(const int of){ return of > 0 ? +1 : -1; }
//Invoke multiple independent permutes to offset field by vector 'shift' assuming field is periodic
template<typename FieldType>
void shiftPeriodicField(FieldType &to, const FieldType &from, const std::vector<int> &shift){
int nd = shift.size(); //assume ascending: x,y,z,t
int nshift_dirs = 0;
for(int i=0;i<nd;i++) if(shift[i]!=0) ++nshift_dirs;
if(nshift_dirs == 0){
if(&to != &from) to = from;
return;
}else if(nshift_dirs == 1){
for(int d=0;d<nd;d++){
if(shift[d] != 0){
cyclicPermute(to,from,d,getShiftSign(shift[d]),abs(shift[d]) );
return;
}
}
}else{
FieldType tmp1 = from;
FieldType tmp2 = from;
FieldType * send = &tmp1;
FieldType * recv = &tmp2;
int shifts_done = 0;
for(int d=0;d<nd;d++){
if(shift[d] != 0){
cyclicPermute(shifts_done < nshift_dirs-1 ? *recv : to,*send,d,getShiftSign(shift[d]),abs(shift[d]) );
++shifts_done;
if(shifts_done < nshift_dirs) std::swap(send,recv);
else return;
}
}
}
}
template<typename CPSfieldType>
void fft(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false,
typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, complex_double_or_float_mark>::value, const int>::type = 0
){
typedef typename LocalToGlobalInOneDirMap<typename CPSfieldType::FieldDimensionPolicy>::type DimPolGlobalInOneDir;
typedef CPSfieldGlobalInOneDir<typename CPSfieldType::FieldSiteType, CPSfieldType::FieldSiteSize, DimPolGlobalInOneDir, typename CPSfieldType::FieldFlavorPolicy, typename CPSfieldType::FieldAllocPolicy> CPSfieldTypeGlobalInOneDir;
int dcount = 0;
for(int mu=0;mu<CPSfieldType::FieldDimensionPolicy::EuclideanDimension;mu++)
if(do_dirs[mu]){
CPSfieldTypeGlobalInOneDir tmp_dbl(mu);
tmp_dbl.gather( dcount==0 ? from : into );
tmp_dbl.fft(inverse_transform);
tmp_dbl.scatter(into);
dcount ++;
}
}
#ifdef USE_GRID
template<typename CPSfieldType>
void fft(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false,
typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, grid_vector_complex_mark>::value, const int>::type = 0
){
typedef typename Grid::GridTypeMapper<typename CPSfieldType::FieldSiteType>::scalar_type ScalarType;
typedef typename CPSfieldType::FieldDimensionPolicy::EquivalentScalarPolicy ScalarDimPol;
typedef CPSfield<ScalarType, CPSfieldType::FieldSiteSize, ScalarDimPol, typename CPSfieldType::FieldFlavorPolicy, StandardAllocPolicy> ScalarFieldType;
NullObject null_obj;
ScalarFieldType tmp_in(null_obj);
ScalarFieldType tmp_out(null_obj);
tmp_in.importField(from);
fft(tmp_out, tmp_in, do_dirs, inverse_transform);
tmp_out.exportField(into);
}
#endif
template<typename CPSfieldType>
void fft(CPSfieldType &fftme, const bool* do_dirs){
fft(fftme,fftme,do_dirs);
}
template<typename CPSfieldType>
void fft_opt(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false,
typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, complex_double_or_float_mark>::value, const int>::type = 0
){
#ifndef USE_MPI
fft(into,from,do_dirs,inverse_transform);
#else
enum { Dimension = CPSfieldType::FieldDimensionPolicy::EuclideanDimension };
int ndirs_fft = 0; for(int i=0;i<Dimension;i++) if(do_dirs[i]) ++ndirs_fft;
if(! ndirs_fft ) return;
//Need info on the MPI node mapping
assert(GJP.Snodes() == 1);
std::vector<int> node_map;
getMPIrankMap(node_map);
CPSfieldType tmp(from.getDimPolParams());
//we want the last fft to end up in 'into'. Intermediate FFTs cycle between into and tmp as temp storage. Thus for odd ndirs_fft, the first fft should output to 'into', for even it should output to 'tmp'
CPSfieldType *tmp1, *tmp2;
if(ndirs_fft % 2 == 1){
tmp1 = &into; tmp2 = &tmp;
}else{
tmp1 = &tmp; tmp2 = &into;
}
CPSfieldType* src = tmp2;
CPSfieldType* out = tmp1;
int fft_count = 0;
for(int mu=0; mu<Dimension; mu++){
if(do_dirs[mu]){
CPSfieldType const *msrc = fft_count == 0 ? &from : src;
fft_opt_mu(*out, *msrc, mu, node_map, inverse_transform);
++fft_count;
std::swap(src,out);
}
}
#endif
}
#ifdef USE_MPI
template<typename CPSfieldType>
void fft_opt_mu(CPSfieldType &into, const CPSfieldType &from, const int mu, const std::vector<int> &node_map, const bool inverse_transform,
typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, complex_double_or_float_mark>::value, const int>::type = 0
){
enum {SiteSize = CPSfieldType::FieldSiteSize, Dimension = CPSfieldType::FieldDimensionPolicy::EuclideanDimension };
typedef typename CPSfieldType::FieldSiteType ComplexType;
typedef typename ComplexType::value_type FloatType;
typedef typename FFTWwrapper<FloatType>::complexType FFTComplex;
const int nf = from.nflavors();
const int foff = from.flav_offset();
const int nthread = omp_get_max_threads();
//Eg for fft in X-direction, divide up Y,Z,T work over nodes in X-direction doing linear FFTs.
const int munodesites = GJP.NodeSites(mu);
const int munodes = GJP.Nodes(mu);
const int mutotalsites = munodesites*munodes;
const int munodecoor = GJP.NodeCoor(mu);
const int n_orthdirs = Dimension - 1;
FloatType Lmu(mutotalsites);
int orthdirs[n_orthdirs]; //map of orthogonal directions to mu
int total_work_munodes = 1; //sites orthogonal to FFT direction
int o=0;
for(int i=0;i< Dimension;i++)
if(i!=mu){
total_work_munodes *= GJP.NodeSites(i);
orthdirs[o++] = i;
}
//Divvy up work over othogonal directions
int munodes_work[munodes];
int munodes_off[munodes];
for(int i=0;i<munodes;i++)
thread_work(munodes_work[i],munodes_off[i], total_work_munodes, i, munodes); //use for node work instead :)
//Get MPI ranks of nodes in mu direction
int my_node_coor[4];
for(int i=0;i<4;i++) my_node_coor[i] = GJP.NodeCoor(i);
int munodes_mpiranks[munodes];
for(int i=0;i<munodes;i++){
int munode_coor[4]; memcpy(munode_coor,my_node_coor,4*sizeof(int));
munode_coor[mu] = i;
const int munode_lex = node_lex( munode_coor, 4 );
munodes_mpiranks[i] = node_map[munode_lex];
}
//Gather send data
ComplexType* send_bufs[munodes];
int send_buf_sizes[munodes];
for(int i=0;i<munodes;i++){
send_buf_sizes[i] = munodes_work[i] * munodesites * nf * SiteSize;
send_bufs[i] = (ComplexType*)malloc( send_buf_sizes[i] * sizeof(ComplexType) );
for(int w = 0; w < munodes_work[i]; w++){ //index of orthogonal site within workload for i'th node in mu direction
const int orthsite = munodes_off[i] + w;
int coor_base[Dimension] = {0};
//Unmap orthsite into a base coordinate
int rem = orthsite;
for(int a=0;a<n_orthdirs;a++){
const int dir_a = orthdirs[a];
coor_base[dir_a] = rem % GJP.NodeSites(dir_a); rem /= GJP.NodeSites(dir_a);
}
for(int f=0;f<nf;f++){
for(int xmu=0;xmu<munodesites;xmu++){
ComplexType* to = send_bufs[i] + SiteSize * (w + munodes_work[i]*( f + nf*xmu ) ); //with musite changing slowest
coor_base[mu] = xmu;
ComplexType const* frm = from.site_ptr(coor_base,f);
memcpy(to,frm,SiteSize*sizeof(ComplexType));
}
}
}
}
MPI_Request send_req[munodes];
MPI_Request recv_req[munodes];
MPI_Status status[munodes];
//Prepare recv buf
const int bufsz = munodes_work[munodecoor] * mutotalsites * nf * SiteSize; //complete line in mu for each orthogonal coordinate
ComplexType* recv_buf = (ComplexType*)malloc(bufsz * sizeof(ComplexType) );
//Setup send/receive
for(int i=0;i<munodes;i++){ //works fine to send to all nodes, even if this involves a send to self.
int sret = MPI_Isend(send_bufs[i], send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], 0, MPI_COMM_WORLD, &send_req[i]);
assert(sret == MPI_SUCCESS);
int rret = MPI_Irecv(recv_buf + i*munodes_work[munodecoor]*nf*SiteSize*munodesites, send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], MPI_ANY_TAG, MPI_COMM_WORLD, &recv_req[i]);
assert(rret == MPI_SUCCESS);
}
int wret = MPI_Waitall(munodes,recv_req,status);
assert(wret == MPI_SUCCESS);
//Do FFT
const int howmany = munodes_work[munodecoor] * nf * SiteSize;
const int howmany_per_thread_base = howmany / nthread;
//Divide work orthogonal to mu, 'howmany', over threads. Note, this may not divide howmany equally. The difference is made up by adding 1 unit of work to threads in ascending order until total work matches. Thus we need 2 plans: 1 for the base amount and one for the base+1
//if(!UniqueID()) printf("FFT work per site %d, divided over %d threads with %d work each. Remaining work %d allocated to ascending threads\n", howmany, nthread, howmany_per_thread_base, howmany - howmany_per_thread_base*nthread);
int fft_phase = inverse_transform ? FFTW_BACKWARD : FFTW_FORWARD;
static FFTplanContainer<FloatType> plan_f_base[Dimension]; //destructors deallocate plans
static FFTplanContainer<FloatType> plan_f_base_p1[Dimension];
static int plan_howmany[Dimension];
static bool plan_init = false;
static int plan_fft_phase;
if(!plan_init || plan_howmany[mu] != howmany || fft_phase != plan_fft_phase){
if(!plan_init) for(int i=0;i<Dimension;i++) plan_howmany[i] = -1;
typename FFTWwrapper<FloatType>::complexType *tmp_f; //I don't think it actually does anything with this
plan_fft_phase = fft_phase;
const int fft_work_per_musite = howmany_per_thread_base;
const int musite_stride = howmany; //stride between musites
plan_f_base[mu].setPlan(1, &mutotalsites, fft_work_per_musite,
tmp_f, NULL, musite_stride, 1,
tmp_f, NULL, musite_stride, 1,
plan_fft_phase, FFTW_ESTIMATE);
plan_f_base_p1[mu].setPlan(1, &mutotalsites, fft_work_per_musite+1,
tmp_f, NULL, musite_stride, 1,
tmp_f, NULL, musite_stride, 1,
plan_fft_phase, FFTW_ESTIMATE);
plan_init = true; //other mu's will still init later
}
FFTComplex*fftw_mem = (FFTComplex*)recv_buf;
#pragma omp parallel
{
assert(nthread == omp_get_num_threads()); //plans will be messed up if not true
const int me = omp_get_thread_num();
int thr_work, thr_off;
thread_work(thr_work, thr_off, howmany, me, nthread);
const FFTplanContainer<FloatType>* thr_plan_ptr;
if(thr_work == howmany_per_thread_base) thr_plan_ptr = &plan_f_base[mu];
else if(thr_work == howmany_per_thread_base + 1) thr_plan_ptr = &plan_f_base_p1[mu];
else assert(0); //catch if logic for thr_work changes
FFTWwrapper<FloatType>::execute_dft(thr_plan_ptr->getPlan(), fftw_mem + thr_off, fftw_mem + thr_off);
}
wret = MPI_Waitall(munodes,send_req,status);
assert(wret == MPI_SUCCESS);
//Send back out. Reuse the old send buffers as receive buffers and vice versa
for(int i=0;i<munodes;i++){ //works fine to send to all nodes, even if this involves a send to self
int sret = MPI_Isend(recv_buf + i*munodes_work[munodecoor]*nf*SiteSize*munodesites, send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], 0, MPI_COMM_WORLD, &send_req[i]);
assert(sret == MPI_SUCCESS);
int rret = MPI_Irecv(send_bufs[i], send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], MPI_ANY_TAG, MPI_COMM_WORLD, &recv_req[i]);
assert(rret == MPI_SUCCESS);
}
wret = MPI_Waitall(munodes,recv_req,status);
assert(wret == MPI_SUCCESS);
//Poke into output
for(int i=0;i<munodes;i++){
#pragma omp parallel for
for(int w = 0; w < munodes_work[i]; w++){ //index of orthogonal site within workload for i'th node in mu direction
const int orthsite = munodes_off[i] + w;
int coor_base[Dimension] = {0};
//Unmap orthsite into a base coordinate
int rem = orthsite;
for(int a=0;a<n_orthdirs;a++){
int dir_a = orthdirs[a];
coor_base[dir_a] = rem % GJP.NodeSites(dir_a); rem /= GJP.NodeSites(dir_a);
}
for(int f=0;f<nf;f++){
for(int xmu=0;xmu<munodesites;xmu++){
coor_base[mu] = xmu;
ComplexType* to = into.site_ptr(coor_base,f);
ComplexType const* frm = send_bufs[i] + SiteSize * (w + munodes_work[i]*( f + nf*xmu ) );
if(!inverse_transform) memcpy(to,frm,SiteSize*sizeof(ComplexType));
else for(int s=0;s<SiteSize;s++) to[s] = frm[s]/Lmu;
}
}
}
}
wret = MPI_Waitall(munodes,send_req,status);
assert(wret == MPI_SUCCESS);
free(recv_buf);
for(int i=0;i<munodes;i++) free(send_bufs[i]);
}
#endif
#ifdef USE_GRID
template<typename CPSfieldType>
void fft_opt(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false,
typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, grid_vector_complex_mark>::value, const int>::type = 0
){ //we can avoid the copies below but with some effort - do at some point
# ifdef USE_MPI
fft(into,from,do_dirs,inverse_transform);
# else
typedef typename Grid::GridTypeMapper<typename CPSfieldType::FieldSiteType>::scalar_type ScalarType;
typedef typename CPSfieldType::FieldDimensionPolicy::EquivalentScalarPolicy ScalarDimPol;
typedef CPSfield<ScalarType, CPSfieldType::FieldSiteSize, ScalarDimPol, typename CPSfieldType::FieldFlavorPolicy, StandardAllocPolicy> ScalarFieldType;
NullObject null_obj;
ScalarFieldType tmp_in(null_obj);
ScalarFieldType tmp_out(null_obj);
tmp_in.importField(from);
fft_opt(tmp_out, tmp_in, do_dirs, inverse_transform);
tmp_out.exportField(into);
# endif
}
#endif
CPS_END_NAMESPACE
#endif
|
detector.c | #include "darknet.h"
#include "detection_gold_w.h"
#include "cuda.h"
#define PRINT_INTERVAL 10
static int coco_ids[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 84, 85, 86, 87, 88, 89, 90 };
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
int ngpus, int clear) {
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
real_t avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for (i = 0; i < ngpus; ++i) {
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate,
net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
real_t jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **) list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while (get_current_batch(net) < net->max_batches) {
if (l.random && count++ % 10 == 0) {
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net) + 200 > net->max_batches)
dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for (i = 0; i < ngpus; ++i) {
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time = what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = real_t_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = real_t_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = real_t_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now() - time);
time = what_time_is_it_now();
real_t loss = 0;
#ifdef GPU
if (ngpus == 1) {
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0)
avg_loss = loss;
avg_loss = avg_loss * .9 + loss * .1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n",
get_current_batch(net), loss, avg_loss, get_current_rate(net),
what_time_is_it_now() - time, i * imgs);
if (i % 100 == 0) {
#ifdef GPU
if (ngpus != 1)
sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if (i % 10000 == 0 || (i < 1000 && i % 100 == 0)) {
#ifdef GPU
if (ngpus != 1)
sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if (ngpus != 1)
sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename) {
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if (c)
p = c;
return atoi(p + 1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets,
int num_boxes, int classes, int w, int h) {
int i, j;
int image_id = get_coco_image_id(image_path);
for (i = 0; i < num_boxes; ++i) {
real_t xmin = dets[i].bbox.x - dets[i].bbox.w / 2.;
real_t xmax = dets[i].bbox.x + dets[i].bbox.w / 2.;
real_t ymin = dets[i].bbox.y - dets[i].bbox.h / 2.;
real_t ymax = dets[i].bbox.y + dets[i].bbox.h / 2.;
if (xmin < 0)
xmin = 0;
if (ymin < 0)
ymin = 0;
if (xmax > w)
xmax = w;
if (ymax > h)
ymax = h;
real_t bx = xmin;
real_t by = ymin;
real_t bw = xmax - xmin;
real_t bh = ymax - ymin;
for (j = 0; j < classes; ++j) {
if (dets[i].prob[j])
fprintf(fp,
"{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n",
image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total,
int classes, int w, int h) {
int i, j;
for (i = 0; i < total; ++i) {
real_t xmin = dets[i].bbox.x - dets[i].bbox.w / 2. + 1;
real_t xmax = dets[i].bbox.x + dets[i].bbox.w / 2. + 1;
real_t ymin = dets[i].bbox.y - dets[i].bbox.h / 2. + 1;
real_t ymax = dets[i].bbox.y + dets[i].bbox.h / 2. + 1;
if (xmin < 1)
xmin = 1;
if (ymin < 1)
ymin = 1;
if (xmax > w)
xmax = w;
if (ymax > h)
ymax = h;
for (j = 0; j < classes; ++j) {
if (dets[i].prob[j])
fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total,
int classes, int w, int h) {
int i, j;
for (i = 0; i < total; ++i) {
real_t xmin = dets[i].bbox.x - dets[i].bbox.w / 2.;
real_t xmax = dets[i].bbox.x + dets[i].bbox.w / 2.;
real_t ymin = dets[i].bbox.y - dets[i].bbox.h / 2.;
real_t ymax = dets[i].bbox.y + dets[i].bbox.h / 2.;
if (xmin < 0)
xmin = 0;
if (ymin < 0)
ymin = 0;
if (xmax > w)
xmax = w;
if (ymax > h)
ymax = h;
for (j = 0; j < classes; ++j) {
int class = j;
if (dets[i].prob[class])
fprintf(fp, "%d %d %f %f %f %f %f\n", id, j + 1,
dets[i].prob[class], xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile,
char *outfile) {
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf)
map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n",
net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **) list_to_array(plist);
layer l = net->layers[net->n - 1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if (0 == strcmp(type, "coco")) {
if (!outfile)
outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if (0 == strcmp(type, "imagenet")) {
if (!outfile)
outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if (!outfile)
outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for (j = 0; j < classes; ++j) {
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i = 0;
int t;
real_t thresh = .005;
real_t nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c * 2);
load_args args = { 0 };
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for (t = 0; t < nthreads; ++t) {
args.path = paths[i + t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for (i = nthreads; i < m + nthreads; i += nthreads) {
fprintf(stderr, "%d\n", i);
for (t = 0; t < nthreads && i + t - nthreads < m; ++t) {
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for (t = 0; t < nthreads && i + t < m; ++t) {
args.path = paths[i + t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for (t = 0; t < nthreads && i + t - nthreads < m; ++t) {
char *path = paths[i + t - nthreads];
char *id = basecfg(path);
copy_cpu(net->w * net->h * net->c, val_resized[t].data, 1,
input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w * net->h * net->c, val_resized[t].data, 1,
input.data + net->w * net->h * net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0,
&num);
if (nms)
do_nms_sort(dets, num, classes, nms);
if (coco) {
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet) {
print_imagenet_detections(fp, i + t - nthreads + 1, dets, num,
classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for (j = 0; j < classes; ++j) {
if (fps)
fclose(fps[j]);
}
if (coco) {
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n",
what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile,
char *outfile) {
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf)
map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n",
net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **) list_to_array(plist);
layer l = net->layers[net->n - 1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if (0 == strcmp(type, "coco")) {
if (!outfile)
outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if (0 == strcmp(type, "imagenet")) {
if (!outfile)
outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if (!outfile)
outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for (j = 0; j < classes; ++j) {
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i = 0;
int t;
real_t thresh = .005;
real_t nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = { 0 };
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for (t = 0; t < nthreads; ++t) {
args.path = paths[i + t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for (i = nthreads; i < m + nthreads; i += nthreads) {
fprintf(stderr, "%d\n", i);
for (t = 0; t < nthreads && i + t - nthreads < m; ++t) {
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for (t = 0; t < nthreads && i + t < m; ++t) {
args.path = paths[i + t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for (t = 0; t < nthreads && i + t - nthreads < m; ++t) {
char *path = paths[i + t - nthreads];
char *id = basecfg(path);
real_t *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0,
&nboxes);
if (nms)
do_nms_sort(dets, nboxes, classes, nms);
if (coco) {
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet) {
print_imagenet_detections(fp, i + t - nthreads + 1, dets,
nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for (j = 0; j < classes; ++j) {
if (fps)
fclose(fps[j]);
}
if (coco) {
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n",
what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile) {
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n",
net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **) list_to_array(plist);
layer l = net->layers[net->n - 1];
int j, k;
int m = plist->size;
int i = 0;
real_t thresh = .001;
real_t iou_thresh = .5;
real_t nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
real_t avg_iou = 0;
for (i = 0; i < m; ++i) {
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5,
0, 1, &nboxes);
if (nms)
do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for (k = 0; k < nboxes; ++k) {
if (dets[k].objectness > thresh) {
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = { truth[j].x, truth[j].y, truth[j].w, truth[j].h };
real_t best_iou = 0;
for (k = 0; k < l.w * l.h * l.n; ++k) {
real_t iou = box_iou(dets[k].bbox, t);
if (dets[k].objectness > thresh && iou > best_iou) {
best_iou = iou;
}
}
avg_iou += best_iou;
if (best_iou > iou_thresh) {
++correct;
}
}
fprintf(stderr,
"%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i,
correct, total, (real_t) proposals / (i + 1),
avg_iou * 100 / total, 100. * correct / total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile,
char *filename, real_t thresh, real_t hier_thresh, char *outfile,
int fullscreen) {
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
real_t nms = .45;
while (1) {
if (filename) {
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input)
return;
strtok(input, "\n");
}
image im = load_image_color(input, 0, 0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n - 1];
real_t *X = sized.data;
time = what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input,
what_time_is_it_now() - time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh,
hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms)
do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if (outfile) {
save_image(im, outfile);
} else {
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename)
break;
}
}
void load_all_images(image* imgs, image* sized_images, char** img_names,
int plist_size, int net_w, int net_h) {
int i;
for (i = 0; i < plist_size; i++) {
imgs[i] = load_image_color(img_names[i], 0, 0);
sized_images[i] = letterbox_image(imgs[i], net_w, net_h);
}
}
void free_all_images(image **imgs, image** sized_images, int list_size,
int smx_red) {
// free_image(im);
int i, s;
for (s = 0; s < smx_red; s++) {
for (i = 0; i < list_size; i++) {
free_image(imgs[s][i]);
free_image(sized_images[s][i]);
}
free(imgs[s]);
free(sized_images[s]);
}
free(imgs);
free(sized_images);
}
#ifdef GPU
cudaStream_t* init_multi_streams(int smx_size) {
cudaStream_t* stream_array = malloc(sizeof(cudaStream_t) * smx_size);
int smx;
for (smx = 0; smx < smx_size; smx++) {
stream_array[smx] = NULL;
// cudaError_t status = cudaStreamCreate(&stream_array[smx]);
// check_error(status);
}
return stream_array;
}
void del_multi_streams(cudaStream_t* stream_array, int smx_size) {
int smx;
for (smx = 0; smx < smx_size; smx++) {
// cudaError_t status = cudaStreamDestroy(stream_array[smx]);
// check_error(status);
}
free(stream_array);
}
#endif
void test_detector_radiation(char *datacfg, char *cfgfile, char *weightfile,
char *filename, real_t thresh, real_t hier_thresh, char *outfile,
int fullscreen, int argc, char** argv) {
/**
* DetectionGold declaration
*/
detection_gold_t *gold = create_detection_gold(argc, argv, thresh,
hier_thresh, filename, cfgfile, datacfg, "detector", weightfile);
int smx_redundancy = get_smx_redundancy(gold);
#ifdef GPU
cudaStream_t *stream_array = init_multi_streams(smx_redundancy);
//--------------------------
#endif
network** net_array = malloc(sizeof(network*) * smx_redundancy);
printf(
"CFG FILE: %s\nDATA CFG: %s\nWeightfile: %s\nImage data path file: %s\nThresh: %f\n",
cfgfile, datacfg, weightfile, filename, thresh);
//load images
image** image_array = malloc(sizeof(image*) * smx_redundancy);
image** sized_array = malloc(sizeof(image*) * smx_redundancy);
char **img_names = get_labels(filename);
int max_it = get_iterations(gold);
int plist_size = get_img_num(gold);
int inet;
for (inet = 0; inet < smx_redundancy; inet++) {
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
//Set tensor cores on the net
net->smx_redundancy = smx_redundancy;
#ifdef GPU
net->use_tensor_cores = get_use_tensor_cores(gold);
net->st = stream_array[inet];
#endif
net_array[inet] = net;
//load images
printf("Loading images for network %d\n", inet);
image_array[inet] = (image*) malloc(sizeof(image) * plist_size);
sized_array[inet] = (image*) malloc(sizeof(image) * plist_size);
load_all_images(image_array[inet], sized_array[inet], img_names,
plist_size, net_array[inet]->w, net_array[inet]->h);
}
srand(2222222);
double time;
real_t nms = .45;
int iteration, img;
//
// image* images = (image*) malloc(sizeof(image) * plist_size);
// image* sized_images = (image*) malloc(sizeof(image) * plist_size);
// load_all_images(images, sized_images, img_names, plist_size,
// net_array[0]->w, net_array[0]->h);
real_t** X_arr = malloc(sizeof(real_t*) * smx_redundancy);
detection** dets_array = malloc(sizeof(detection*) * smx_redundancy);
int* nboxes_array = malloc(sizeof(int) * smx_redundancy);
//start the process
for (iteration = 0; iteration < max_it; iteration++) {
// int last_errors = 0;
for (img = 0; img < plist_size; img++) {
layer l = net_array[0]->layers[net_array[0]->n - 1];
// real_t *X = sized.data;
image im = image_array[0][img];
for (inet = 0; inet < smx_redundancy; inet++) {
// image sized = sized_array[inet][img];
X_arr[inet] = sized_array[inet][img].data;
}
time = what_time_is_it_now();
//Run one iteration
start_iteration_wrapper(gold);
// network_predict(net, X);
network_predict_smx_red(net_array, X_arr);
end_iteration_wrapper(gold);
// int nboxes = 0;
// printf("aui antes do dets\n");
for (inet = 0; inet < smx_redundancy; inet++) {
dets_array[inet] = get_network_boxes(net_array[inet], im.w,
im.h, thresh, hier_thresh, 0, 1, &nboxes_array[inet]);
if (nms)
do_nms_sort(dets_array[inet], nboxes_array[inet], l.classes,
nms);
}
// printf("aui antes do run\n");
//Save or compare
double start = what_time_is_it_now();
int curr_err = run(gold, dets_array, nboxes_array, img, l.classes,
im.w, im.h);
double end = what_time_is_it_now();
/*
if (last_errors && curr_err) {
printf(
"IT IS LESS PROBLABLE THAT DARKNET GIVE US TWO ERRORS SEQUENTIALY, ABORTING\n");
exit(-1);
}*/
for (inet = 0; inet < smx_redundancy; inet++) {
// if ((iteration * img) % PRINT_INTERVAL == 0) {
printf(
"Iteration %d img %d, %d objects predicted in %f seconds. %d errors, coparisson took %lfs\n",
iteration, img, nboxes_array[inet],
what_time_is_it_now() - time, curr_err, end - start);
// }
free_detections(dets_array[inet], nboxes_array[inet]);
}
// last_errors = curr_err;
}
}
free(dets_array);
free(nboxes_array);
for (inet = 0; inet < smx_redundancy; inet++) {
free_network(net_array[inet]);
}
free(net_array);
#ifdef GPU
del_multi_streams(stream_array, smx_redundancy);
#endif
destroy_detection_gold(gold);
free_all_images(image_array, sized_array, plist_size, smx_redundancy);
free(X_arr);
}
void run_detector(int argc, char **argv) {
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
real_t thresh = find_real_t_arg(argc, argv, "-thresh", .5);
real_t hier_thresh = find_real_t_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if (argc < 4) {
fprintf(stderr,
"usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n",
argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if (gpu_list) {
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for (i = 0; i < len; ++i) {
if (gpu_list[i] == ',')
++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for (i = 0; i < ngpus; ++i) {
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',') + 1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6] : 0;
if (0 == strcmp(argv[2], "test"))
test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh,
outfile, fullscreen);
else if (0 == strcmp(argv[2], "test_radiation"))
test_detector_radiation(datacfg, cfg, weights, filename, thresh,
hier_thresh, outfile, fullscreen, argc, argv);
else if (0 == strcmp(argv[2], "train"))
train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if (0 == strcmp(argv[2], "valid"))
validate_detector(datacfg, cfg, weights, outfile);
else if (0 == strcmp(argv[2], "valid2"))
validate_detector_flip(datacfg, cfg, weights, outfile);
else if (0 == strcmp(argv[2], "recall"))
validate_detector_recall(cfg, weights);
else if (0 == strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes,
frame_skip, prefix, avg, hier_thresh, width, height, fps,
fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
main.c | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "AVI/avilib.h"
#include "AVI/avimod.h"
#include <omp.h>
//#include "define.c"
#include "kernel.c"
//===============================================================================================================================================================================================================200
// WRITE DATA FUNCTION
//===============================================================================================================================================================================================================200
#include <omp.h>
void write_data(char *filename,int frameNo,int frames_processed,int endoPoints,int *input_a,int *input_b,int epiPoints,int *input_2a,int *input_2b)
{
//================================================================================80
// VARIABLES
//================================================================================80
FILE *fid;
int i;
int j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename,"w+");
if (fid == ((void *)0)) {
printf("The file was not opened for writing\n");
return ;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid,"Total AVI Frames: %d\n",frameNo);
fprintf(fid,"Frames Processed: %d\n",frames_processed);
fprintf(fid,"endoPoints: %d\n",endoPoints);
fprintf(fid,"epiPoints: %d",epiPoints);
for (j = 0; j <= frames_processed - 1; j += 1) {
fprintf(fid,"\n---Frame %d---",j);
fprintf(fid,"\n--endo--\n",j);
for (i = 0; i <= endoPoints - 1; i += 1) {
fprintf(fid,"%d\t",input_a[j + i * frameNo]);
}
fprintf(fid,"\n");
for (i = 0; i <= endoPoints - 1; i += 1) {
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid,"%d\t",input_b[j + i * frameNo]);
}
fprintf(fid,"\n--epi--\n",j);
for (i = 0; i <= epiPoints - 1; i += 1) {
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid,"%d\t",input_2a[j + i * frameNo]);
}
fprintf(fid,"\n");
for (i = 0; i <= epiPoints - 1; i += 1) {
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid,"%d\t",input_2b[j + i * frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc,char *argv[])
{
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// counters
int i;
int frames_processed;
// parameters
public_struct public;
private_struct private[51];
//======================================================================================================================================================
// FRAMES
//======================================================================================================================================================
if (argc != 4) {
printf("ERROR: usage: heartwall <inputfile> <num of frames> <num of threads>\n");
exit(1);
}
char *video_file_name;
video_file_name = argv[1];
// added casting
avi_t *d_frames = (avi_t *)(AVI_open_input_file(video_file_name,1));
if (d_frames == ((void *)0)) {
AVI_print_error((char *)"Error with AVI_open_input_file");
return - 1;
}
public . d_frames = d_frames;
public . frames = (AVI_video_frames(public . d_frames));
public . frame_rows = AVI_video_height(public . d_frames);
public . frame_cols = AVI_video_width(public . d_frames);
public . frame_elem = public . frame_rows * public . frame_cols;
public . frame_mem = (sizeof(float ) * public . frame_elem);
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if (frames_processed < 0 || frames_processed > public . frames) {
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n",frames_processed,public . frames);
return 0;
}
int omp_num_threads;
omp_num_threads = atoi(argv[3]);
if (omp_num_threads <= 0) {
printf("num of threads must be a positive integer");
return 0;
}
printf("num of threads: %d\n",omp_num_threads);
//======================================================================================================================================================
// INPUTS
//======================================================================================================================================================
//====================================================================================================
// ENDO POINTS
//====================================================================================================
public . endoPoints = 20;
public . d_endo_mem = (sizeof(int ) * public . endoPoints);
public . d_endoRow = ((int *)(malloc(public . d_endo_mem)));
public . d_endoRow[0] = 369;
public . d_endoRow[1] = 400;
public . d_endoRow[2] = 429;
public . d_endoRow[3] = 452;
public . d_endoRow[4] = 476;
public . d_endoRow[5] = 486;
public . d_endoRow[6] = 479;
public . d_endoRow[7] = 458;
public . d_endoRow[8] = 433;
public . d_endoRow[9] = 404;
public . d_endoRow[10] = 374;
public . d_endoRow[11] = 346;
public . d_endoRow[12] = 318;
public . d_endoRow[13] = 294;
public . d_endoRow[14] = 277;
public . d_endoRow[15] = 269;
public . d_endoRow[16] = 275;
public . d_endoRow[17] = 287;
public . d_endoRow[18] = 311;
public . d_endoRow[19] = 339;
public . d_endoCol = ((int *)(malloc(public . d_endo_mem)));
public . d_endoCol[0] = 408;
public . d_endoCol[1] = 406;
public . d_endoCol[2] = 397;
public . d_endoCol[3] = 383;
public . d_endoCol[4] = 354;
public . d_endoCol[5] = 322;
public . d_endoCol[6] = 294;
public . d_endoCol[7] = 270;
public . d_endoCol[8] = 250;
public . d_endoCol[9] = 237;
public . d_endoCol[10] = 235;
public . d_endoCol[11] = 241;
public . d_endoCol[12] = 254;
public . d_endoCol[13] = 273;
public . d_endoCol[14] = 300;
public . d_endoCol[15] = 328;
public . d_endoCol[16] = 356;
public . d_endoCol[17] = 383;
public . d_endoCol[18] = 401;
public . d_endoCol[19] = 411;
public . d_tEndoRowLoc = ((int *)(malloc((public . d_endo_mem * public . frames))));
public . d_tEndoColLoc = ((int *)(malloc((public . d_endo_mem * public . frames))));
//====================================================================================================
// EPI POINTS
//====================================================================================================
public . epiPoints = 31;
public . d_epi_mem = (sizeof(int ) * public . epiPoints);
public . d_epiRow = ((int *)(malloc(public . d_epi_mem)));
public . d_epiRow[0] = 390;
public . d_epiRow[1] = 419;
public . d_epiRow[2] = 448;
public . d_epiRow[3] = 474;
public . d_epiRow[4] = 501;
public . d_epiRow[5] = 519;
public . d_epiRow[6] = 535;
public . d_epiRow[7] = 542;
public . d_epiRow[8] = 543;
public . d_epiRow[9] = 538;
public . d_epiRow[10] = 528;
public . d_epiRow[11] = 511;
public . d_epiRow[12] = 491;
public . d_epiRow[13] = 466;
public . d_epiRow[14] = 438;
public . d_epiRow[15] = 406;
public . d_epiRow[16] = 376;
public . d_epiRow[17] = 347;
public . d_epiRow[18] = 318;
public . d_epiRow[19] = 291;
public . d_epiRow[20] = 275;
public . d_epiRow[21] = 259;
public . d_epiRow[22] = 256;
public . d_epiRow[23] = 252;
public . d_epiRow[24] = 252;
public . d_epiRow[25] = 257;
public . d_epiRow[26] = 266;
public . d_epiRow[27] = 283;
public . d_epiRow[28] = 305;
public . d_epiRow[29] = 331;
public . d_epiRow[30] = 360;
public . d_epiCol = ((int *)(malloc(public . d_epi_mem)));
public . d_epiCol[0] = 457;
public . d_epiCol[1] = 454;
public . d_epiCol[2] = 446;
public . d_epiCol[3] = 431;
public . d_epiCol[4] = 411;
public . d_epiCol[5] = 388;
public . d_epiCol[6] = 361;
public . d_epiCol[7] = 331;
public . d_epiCol[8] = 301;
public . d_epiCol[9] = 273;
public . d_epiCol[10] = 243;
public . d_epiCol[11] = 218;
public . d_epiCol[12] = 196;
public . d_epiCol[13] = 178;
public . d_epiCol[14] = 166;
public . d_epiCol[15] = 157;
public . d_epiCol[16] = 155;
public . d_epiCol[17] = 165;
public . d_epiCol[18] = 177;
public . d_epiCol[19] = 197;
public . d_epiCol[20] = 218;
public . d_epiCol[21] = 248;
public . d_epiCol[22] = 276;
public . d_epiCol[23] = 304;
public . d_epiCol[24] = 333;
public . d_epiCol[25] = 361;
public . d_epiCol[26] = 391;
public . d_epiCol[27] = 415;
public . d_epiCol[28] = 434;
public . d_epiCol[29] = 448;
public . d_epiCol[30] = 455;
public . d_tEpiRowLoc = ((int *)(malloc((public . d_epi_mem * public . frames))));
public . d_tEpiColLoc = ((int *)(malloc((public . d_epi_mem * public . frames))));
//====================================================================================================
// ALL POINTS
//====================================================================================================
public . allPoints = 51;
//======================================================================================================================================================
// CONSTANTS
//======================================================================================================================================================
public . tSize = 25;
public . sSize = 40;
public . maxMove = 10;
public . alpha = 0.87;
//======================================================================================================================================================
// SUMS
//======================================================================================================================================================
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . in_partial_sum = ((float *)(malloc(sizeof(float ) * 2 * public . tSize + 1)));
private[i] . in_sqr_partial_sum = ((float *)(malloc(sizeof(float ) * 2 * public . tSize + 1)));
private[i] . par_max_val = ((float *)(malloc(sizeof(float ) * (2 * public . tSize + 2 * public . sSize + 1))));
private[i] . par_max_coo = ((int *)(malloc(sizeof(int ) * (2 * public . tSize + 2 * public . sSize + 1))));
}
//======================================================================================================================================================
// INPUT 2 (SAMPLE AROUND POINT)
//======================================================================================================================================================
public . in2_rows = 2 * public . sSize + 1;
public . in2_cols = 2 * public . sSize + 1;
public . in2_elem = public . in2_rows * public . in2_cols;
public . in2_mem = (sizeof(float ) * public . in2_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_in2 = ((float *)(malloc(public . in2_mem)));
private[i] . d_in2_sqr = ((float *)(malloc(public . in2_mem)));
}
//======================================================================================================================================================
// INPUT (POINT TEMPLATE)
//======================================================================================================================================================
public . in_mod_rows = public . tSize + 1 + public . tSize;
public . in_mod_cols = public . in_mod_rows;
public . in_mod_elem = public . in_mod_rows * public . in_mod_cols;
public . in_mod_mem = (sizeof(float ) * public . in_mod_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_in_mod = ((float *)(malloc(public . in_mod_mem)));
private[i] . d_in_sqr = ((float *)(malloc(public . in_mod_mem)));
}
//======================================================================================================================================================
// ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
public . d_endoT = ((float *)(malloc((public . in_mod_mem * public . endoPoints))));
public . d_epiT = ((float *)(malloc((public . in_mod_mem * public . epiPoints))));
//======================================================================================================================================================
// SETUP private POINTERS TO ROWS, COLS AND TEMPLATE
//======================================================================================================================================================
#pragma omp parallel for private (i)
for (i = 0; i <= public . endoPoints - 1; i += 1) {
private[i] . point_no = i;
private[i] . in_pointer = private[i] . point_no * public . in_mod_elem;
// original row coordinates
private[i] . d_Row = public . d_endoRow;
// original col coordinates
private[i] . d_Col = public . d_endoCol;
// updated row coordinates
private[i] . d_tRowLoc = public . d_tEndoRowLoc;
// updated row coordinates
private[i] . d_tColLoc = public . d_tEndoColLoc;
// templates
private[i] . d_T = public . d_endoT;
}
#pragma omp parallel for private (i)
for (i = public . endoPoints; i <= public . allPoints - 1; i += 1) {
private[i] . point_no = i - public . endoPoints;
private[i] . in_pointer = private[i] . point_no * public . in_mod_elem;
private[i] . d_Row = public . d_epiRow;
private[i] . d_Col = public . d_epiCol;
private[i] . d_tRowLoc = public . d_tEpiRowLoc;
private[i] . d_tColLoc = public . d_tEpiColLoc;
private[i] . d_T = public . d_epiT;
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
public . ioffset = 0;
public . joffset = 0;
// number of rows in I
public . conv_rows = public . in_mod_rows + public . in2_rows - 1;
// number of columns in I
public . conv_cols = public . in_mod_cols + public . in2_cols - 1;
// number of elements
public . conv_elem = public . conv_rows * public . conv_cols;
public . conv_mem = (sizeof(float ) * public . conv_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_conv = ((float *)(malloc(public . conv_mem)));
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PAD ARRAY
//====================================================================================================
//====================================================================================================
// VERTICAL CUMULATIVE SUM
//====================================================================================================
public . in2_pad_add_rows = public . in_mod_rows;
public . in2_pad_add_cols = public . in_mod_cols;
public . in2_pad_rows = public . in2_rows + 2 * public . in2_pad_add_rows;
public . in2_pad_cols = public . in2_cols + 2 * public . in2_pad_add_cols;
public . in2_pad_elem = public . in2_pad_rows * public . in2_pad_cols;
public . in2_pad_mem = (sizeof(float ) * public . in2_pad_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_in2_pad = ((float *)(malloc(public . in2_pad_mem)));
}
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION
//====================================================================================================
//====================================================================================================
// HORIZONTAL CUMULATIVE SUM
//====================================================================================================
// (1 to n+1)
public . in2_pad_cumv_sel_rowlow = 1 + public . in_mod_rows;
public . in2_pad_cumv_sel_rowhig = public . in2_pad_rows - 1;
public . in2_pad_cumv_sel_collow = 1;
public . in2_pad_cumv_sel_colhig = public . in2_pad_cols;
public . in2_pad_cumv_sel2_rowlow = 1;
public . in2_pad_cumv_sel2_rowhig = public . in2_pad_rows - public . in_mod_rows - 1;
public . in2_pad_cumv_sel2_collow = 1;
public . in2_pad_cumv_sel2_colhig = public . in2_pad_cols;
public . in2_sub_rows = public . in2_pad_cumv_sel_rowhig - public . in2_pad_cumv_sel_rowlow + 1;
public . in2_sub_cols = public . in2_pad_cumv_sel_colhig - public . in2_pad_cumv_sel_collow + 1;
public . in2_sub_elem = public . in2_sub_rows * public . in2_sub_cols;
public . in2_sub_mem = (sizeof(float ) * public . in2_sub_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_in2_sub = ((float *)(malloc(public . in2_sub_mem)));
}
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION, SQUARE, NUMERATOR
//====================================================================================================
public . in2_sub_cumh_sel_rowlow = 1;
public . in2_sub_cumh_sel_rowhig = public . in2_sub_rows;
public . in2_sub_cumh_sel_collow = 1 + public . in_mod_cols;
public . in2_sub_cumh_sel_colhig = public . in2_sub_cols - 1;
public . in2_sub_cumh_sel2_rowlow = 1;
public . in2_sub_cumh_sel2_rowhig = public . in2_sub_rows;
public . in2_sub_cumh_sel2_collow = 1;
public . in2_sub_cumh_sel2_colhig = public . in2_sub_cols - public . in_mod_cols - 1;
public . in2_sub2_sqr_rows = public . in2_sub_cumh_sel_rowhig - public . in2_sub_cumh_sel_rowlow + 1;
public . in2_sub2_sqr_cols = public . in2_sub_cumh_sel_colhig - public . in2_sub_cumh_sel_collow + 1;
public . in2_sub2_sqr_elem = public . in2_sub2_sqr_rows * public . in2_sub2_sqr_cols;
public . in2_sub2_sqr_mem = (sizeof(float ) * public . in2_sub2_sqr_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_in2_sub2_sqr = ((float *)(malloc(public . in2_sub2_sqr_mem)));
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// PAD ARRAY
//====================================================================================================
//====================================================================================================
// VERTICAL CUMULATIVE SUM
//====================================================================================================
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION
//====================================================================================================
//====================================================================================================
// HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION, DIFFERENTIAL LOCAL SUM, DENOMINATOR A, DENOMINATOR, CORRELATION
//====================================================================================================
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
public . tMask_rows = public . in_mod_rows + (public . sSize + 1 + public . sSize) - 1;
public . tMask_cols = public . tMask_rows;
public . tMask_elem = public . tMask_rows * public . tMask_cols;
public . tMask_mem = (sizeof(float ) * public . tMask_elem);
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_tMask = ((float *)(malloc(public . tMask_mem)));
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
public . mask_rows = public . maxMove;
public . mask_cols = public . mask_rows;
public . mask_elem = public . mask_rows * public . mask_cols;
public . mask_mem = (sizeof(float ) * public . mask_elem);
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
// number of rows in I
public . mask_conv_rows = public . tMask_rows;
// number of columns in I
public . mask_conv_cols = public . tMask_cols;
// number of elements
public . mask_conv_elem = public . mask_conv_rows * public . mask_conv_cols;
public . mask_conv_mem = (sizeof(float ) * public . mask_conv_elem);
public . mask_conv_ioffset = (public . mask_rows - 1) / 2;
if (((public . mask_rows - 1) % 2) > 0.5) {
public . mask_conv_ioffset = public . mask_conv_ioffset + 1;
}
public . mask_conv_joffset = (public . mask_cols - 1) / 2;
if (((public . mask_cols - 1) % 2) > 0.5) {
public . mask_conv_joffset = public . mask_conv_joffset + 1;
}
for (i = 0; i <= public . allPoints - 1; i += 1) {
private[i] . d_mask_conv = ((float *)(malloc(public . mask_conv_mem)));
}
//======================================================================================================================================================
// PRINT FRAME PROGRESS START
//======================================================================================================================================================
printf("frame progress: ");
fflush(((void *)0));
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
for (public . frame_no = 0; public . frame_no < frames_processed; public . frame_no++) {
//====================================================================================================
// GETTING FRAME
//====================================================================================================
// Extract a cropped version of the first frame from the video file
// pointer to video file
public . d_frame = get_frame(public . d_frames,public . frame_no,0,0,1);
// number of frame that needs to be returned
// cropped?
// scaled?
// converted
//====================================================================================================
// PROCESSING
//====================================================================================================
omp_set_num_threads(omp_num_threads);
for (i = 0; i <= public . allPoints - 1; i += 1) {
kernel(public,private[i]);
}
//====================================================================================================
// FREE MEMORY FOR FRAME
//====================================================================================================
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(public . d_frame);
//====================================================================================================
// PRINT FRAME PROGRESS
//====================================================================================================
printf("%d ",public . frame_no);
fflush(((void *)0));
}
//======================================================================================================================================================
// PRINT FRAME PROGRESS END
//======================================================================================================================================================
printf("\n");
fflush(((void *)0));
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//==================================================50
// DUMP DATA TO FILE
//==================================================50
#ifdef OUTPUT
#endif
//====================================================================================================
// COMMON
//====================================================================================================
free(public . d_endoRow);
free(public . d_endoCol);
free(public . d_tEndoRowLoc);
free(public . d_tEndoColLoc);
free(public . d_endoT);
free(public . d_epiRow);
free(public . d_epiCol);
free(public . d_tEpiRowLoc);
free(public . d_tEpiColLoc);
free(public . d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for (i = 0; i <= public . allPoints - 1; i += 1) {
free(private[i] . in_partial_sum);
free(private[i] . in_sqr_partial_sum);
free(private[i] . par_max_val);
free(private[i] . par_max_coo);
free(private[i] . d_in2);
free(private[i] . d_in2_sqr);
free(private[i] . d_in_mod);
free(private[i] . d_in_sqr);
free(private[i] . d_conv);
free(private[i] . d_in2_pad);
free(private[i] . d_in2_sub);
free(private[i] . d_in2_sub2_sqr);
free(private[i] . d_tMask);
free(private[i] . d_mask_conv);
}
}
//========================================================================================================================================================================================================
//========================================================================================================================================================================================================
// END OF FILE
//========================================================================================================================================================================================================
//========================================================================================================================================================================================================
|
GB_unaryop__lnot_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int8
// op(A') function: GB_tran__lnot_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int8
(
uint8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SpatialAdaptiveMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.c"
#else
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
// 4d tensor B x D x H x W
static void THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(
real *input_p,
real *output_p,
THIndex_t *ind_p,
int64_t sizeD,
int64_t isizeH,
int64_t isizeW,
int64_t osizeH,
int64_t osizeW,
int64_t istrideD,
int64_t istrideH,
int64_t istrideW)
{
int64_t d;
#pragma omp parallel for private(d)
for (d = 0; d < sizeD; d++)
{
/* loop over output */
int64_t oh, ow;
for(oh = 0; oh < osizeH; oh++)
{
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = 0; ow < osizeW; ow++)
{
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
/* local pointers */
real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW;
real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow;
THIndex_t *indp = ind_p + d*osizeH*osizeW + oh*osizeW + ow;
/* compute local max: */
int64_t maxindex = -1;
real maxval = -FLT_MAX;
int64_t tcntr = 0;
int ih, iw;
for(ih = 0; ih < kH; ih++)
{
for(iw = 0; iw < kW; iw++)
{
real val = *(ip + ih*istrideH + iw*istrideW);
if (val > maxval)
{
maxval = val;
maxindex = (ih+istartH)*isizeW + (iw+istartW);
}
}
}
/* set output to local max */
*op = maxval;
/* store location of max */
*indp = maxindex + TH_INDEX_BASE;
}
}
}
}
void THNN_(SpatialAdaptiveMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int osizeW,
int osizeH)
{
int dimW = 2;
int dimH = 1;
int64_t sizeB = 1;
int64_t sizeD;
int64_t isizeH;
int64_t isizeW;
int64_t istrideD;
int64_t istrideH;
int64_t istrideW;
int64_t istrideB;
real *input_data;
real *output_data;
THIndex_t *indices_data;
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 4)
{
istrideB = input->stride[0];
sizeB = input->size[0];
dimW++;
dimH++;
}
/* sizes */
sizeD = input->size[dimH-1];
isizeH = input->size[dimH];
isizeW = input->size[dimW];
/* strides */
istrideD = input->stride[dimH-1];
istrideH = input->stride[dimH];
istrideW = input->stride[dimW];
/* resize output */
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, sizeD, osizeH, osizeW);
/* indices will contain i,j locations for each output point */
THIndexTensor_(resize3d)(indices, sizeD, osizeH, osizeW);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data,
indices_data,
sizeD,
isizeH, isizeW,
osizeH, osizeW,
istrideD,
istrideH, istrideW);
}
else
{
int64_t b;
THTensor_(resize4d)(output, sizeB, sizeD, osizeH, osizeW);
/* indices will contain i,j locations for each output point */
THIndexTensor_(resize4d)(indices, sizeB, sizeD, osizeH, osizeW);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
#pragma omp parallel for private(b)
for (b = 0; b < sizeB; b++)
{
THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeH*osizeW,
indices_data+b*sizeD*osizeH*osizeW,
sizeD,
isizeH, isizeW,
osizeH, osizeW,
istrideD,
istrideH, istrideW);
}
}
}
static void THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
THIndex_t *ind_p,
int64_t sizeD,
int64_t isizeH,
int64_t isizeW,
int64_t osizeH,
int64_t osizeW)
{
int64_t d;
#pragma omp parallel for private(d)
for (d = 0; d < sizeD; d++)
{
real *gradInput_p_d = gradInput_p + d*isizeH*isizeW;
real *gradOutput_p_d = gradOutput_p + d*osizeH*osizeW;
THIndex_t *ind_p_d = ind_p + d*osizeH*osizeW;
/* calculate max points */
int64_t oh, ow;
for(oh = 0; oh < osizeH; oh++)
{
for(ow = 0; ow < osizeW; ow++)
{
/* retrieve position of max */
int64_t maxp = ind_p_d[oh*osizeW + ow] - TH_INDEX_BASE;
/* update gradient */
gradInput_p_d[maxp] += gradOutput_p_d[oh*osizeW + ow];
}
}
}
}
void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices)
{
int dimW = 2;
int dimH = 1;
int64_t sizeB = 1;
int sizeD;
int isizeH;
int isizeW;
int osizeH;
int osizeW;
real *gradInput_data;
real *gradOutput_data;
THIndex_t *indices_data;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 4) {
sizeB = input->size[0];
dimW++;
dimH++;
}
/* sizes */
sizeD = input->size[dimH-1];
isizeH = input->size[dimH];
isizeW = input->size[dimW];
osizeH = gradOutput->size[dimH];
osizeW = gradOutput->size[dimW];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->nDimension == 3)
{
THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
sizeD,
isizeH, isizeW,
osizeH, osizeW);
}
else
{
int64_t b;
#pragma omp parallel for private(b)
for (b = 0; b < sizeB; b++)
{
THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeH*isizeW, gradOutput_data+b*sizeD*osizeH*osizeW,
indices_data+b*sizeD*osizeH*osizeW,
sizeD,
isizeH, isizeW,
osizeH, osizeW);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
task.c | #include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#define PRINT(_args...)
//#define PRINT(_args...) printf(_args)
#define N 64
#define TRY_TASK 1
#define TASK_COMPUTE 1
#define OFF 1
#define T1 1
#define T1 1
#define T2 1
#define T3 1
#define T4 1
#define T5 1
#define T6 1
#define T7 1
#define T8 1
#define T9 1
#define T10 1
#define T11 1
#define T12 1
int main ()
{
int a[N], aa[N];
int b[N], bb[N];
int c[N], cc[N];
int d[N], dd[N];
int e[N], ee[N];
int i, errors;
int cond = 0;
#if OFF
check_offloading();
#endif
// Test: task within target
#if T1
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
int id = omp_get_thread_num();
a[id]++;
#if TRY_TASK
#pragma omp task firstprivate(id) shared(b) default(none)
{
#if TASK_COMPUTE
PRINT("hi alex from %d\n", id);
b[id]++;
#endif
}
#pragma omp taskwait
#endif
}
// reproduce
aa[0]++;
#if TRY_TASK && TASK_COMPUTE
bb[0]++;
#endif
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#1 got %d errors\n", errors);
#endif
// Test: task within parallel
#if T2
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#if TRY_TASK
#pragma omp task firstprivate(id) shared(b)
{
#if TASK_COMPUTE
PRINT("hi alex from %d\n", id);
b[id]++;
#endif
}
#endif
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
#if TRY_TASK && TASK_COMPUTE
bb[i]++;
#endif
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#2 got %d errors\n", errors);
#endif
// Test: multiple nested tasks in parallel region
#if T3
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
}
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#3 got %d errors\n", errors);
#endif
// Test: three successive tasks in a parallel region
#if T4
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#4 got %d errors\n", errors);
#endif
// Test: change of context when entering/exiting tasks
#if T5
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i+1;
c[i] = cc[i] = 3*i+1;
d[i] = dd[i] = 4*i+1;
e[i] = ee[i] = 5*i+1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b, c, d, e)
{
omp_set_schedule(omp_sched_static, 1);
#pragma omp parallel num_threads(64)
{
omp_set_schedule(omp_sched_static, 2);
int id = omp_get_thread_num();
// task 1
#pragma omp task firstprivate(id) shared(b, c, d, e)
{
omp_set_schedule(omp_sched_static, 3);
PRINT("hi alex from %d\n", id);
// task 2
#pragma omp task firstprivate(id) shared(b, c, d, e)
{
omp_set_schedule(omp_sched_static, 4);
PRINT("hi alex from %d\n", id);
// task 3
#pragma omp task firstprivate(id) shared(b, c, d, e)
{
omp_set_schedule(omp_sched_static, 5);
PRINT("hi alex from %d\n", id);
// task 3
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 5) e[id]++;
}
// task 2
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 4) d[id]++;
}
// task 1
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 3) c[id]++;
}
// par
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 2) b[id]++;
}
// team
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 1) a[0]++;
}
// reproduce
aa[0]++;
for(i=0; i<N; i++) {
bb[i]++;
cc[i]++;
dd[i]++;
ee[i]++;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
if (c[i] != cc[i]) printf("%4i: got c %d, expected %d, error %d\n", i, c[i], cc[i], ++errors);
if (d[i] != dd[i]) printf("%4i: got d %d, expected %d, error %d\n", i, d[i], dd[i], ++errors);
if (e[i] != ee[i]) printf("%4i: got e %d, expected %d, error %d\n", i, e[i], ee[i], ++errors);
}
printf("#5 got %d errors\n", errors);
#endif
// Test: change of context when using if clause
#if T6
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i+1;
c[i] = cc[i] = 3*i+1;
d[i] = dd[i] = 4*i+1;
e[i] = ee[i] = 5*i+1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b, c, d, e, cond)
{
omp_set_schedule(omp_sched_static, 1);
#pragma omp parallel num_threads(64)
{
omp_set_schedule(omp_sched_static, 2);
int id = omp_get_thread_num();
// task 1
#pragma omp task firstprivate(id) shared(b, c, d, e) if(cond)
{
omp_set_schedule(omp_sched_static, 3);
PRINT("hi alex from %d\n", id);
// task 2
#pragma omp task firstprivate(id) shared(b, c, d, e) if(cond)
{
omp_set_schedule(omp_sched_static, 4);
PRINT("hi alex from %d\n", id);
// task 3
#pragma omp task firstprivate(id) shared(b, c, d, e) if(cond)
{
omp_set_schedule(omp_sched_static, 5);
PRINT("hi alex from %d\n", id);
// task 3
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 5) e[id]++;
}
// task 2
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 4) d[id]++;
}
// task 1
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 3) c[id]++;
}
// par
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 2) b[id]++;
}
// team
omp_sched_t s; int chunk;
omp_get_schedule(&s, &chunk);
if (s == omp_sched_static && chunk == 1) a[0]++;
}
// reproduce
aa[0]++;
for(i=0; i<N; i++) {
bb[i]++;
cc[i]++;
dd[i]++;
ee[i]++;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
if (c[i] != cc[i]) printf("%4i: got c %d, expected %d, error %d\n", i, c[i], cc[i], ++errors);
if (d[i] != dd[i]) printf("%4i: got d %d, expected %d, error %d\n", i, d[i], dd[i], ++errors);
if (e[i] != ee[i]) printf("%4i: got e %d, expected %d, error %d\n", i, e[i], ee[i], ++errors);
}
printf("#6 got %d errors\n", errors);
#endif
// Test: final
#if T7
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b) final(1)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b) final(1)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b) final(1)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
}
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#7 got %d errors\n", errors);
#endif
// Test: untied
#if T8 && 0
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b) untied
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b) untied
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b) untied
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
}
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#8 got %d errors\n", errors);
#endif
// Test: mergeaeble
#if T9
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b)
{
PRINT("hi alex from %d\n", id);
//#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b) mergeable
{
PRINT("hi alex from %d\n", id);
//#pragma omp atomic
b[id]++;
#pragma omp task firstprivate(id) shared(b) mergeable
{
PRINT("hi alex from %d\n", id);
//#pragma omp atomic
b[id]++;
}
}
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#9 got %d errors\n", errors);
#endif
// Test: private
#if T10 && 0
/*
Test disabled because this test only works on the GPU, where a
task is guaranteed to work on the same thread as it is
created. This is not true in general on the host. So we cannot use
this test generally. Thus I am disabling it here
*/
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#if TRY_TASK
#pragma omp task private(id) shared(b)
{
int id = omp_get_thread_num();
#if TASK_COMPUTE
PRINT("hi alex from %d\n", id);
b[id]++;
#endif
}
#endif
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
#if TRY_TASK && TASK_COMPUTE
bb[i]++;
#endif
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#10 got %d errors\n", errors);
#endif
// Test: depend
#if T11
// init
int x;
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b) depend(out:a[id])
{
PRINT("hi alex from %d\n", id);
b[id]++;
}
#pragma omp task firstprivate(id) shared(b) depend(inout:a[id])
{
PRINT("hi alex from %d\n", id);
b[id]++;
}
#pragma omp task firstprivate(id) shared(b) depend(in:a[id])
{
PRINT("hi alex from %d\n", id);
b[id]++;
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#11 got %d errors\n", errors);
#endif
// Test: inverted priority
#if T12
// init
for(i=0; i<N; i++) {
a[i] = aa[i] = i+1;
b[i] = bb[i] = 2*i +1;
}
// target starts 1 team and many threads in it
#pragma omp target map(tofrom: a, b)
{
#pragma omp parallel num_threads(64)
{
int id = omp_get_thread_num();
a[id]++;
#pragma omp task firstprivate(id) shared(b) priority(0)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
#pragma omp task firstprivate(id) shared(b) priority(10)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
#pragma omp task firstprivate(id) shared(b) priority(20)
{
PRINT("hi alex from %d\n", id);
#pragma omp atomic
b[id]++;
}
}
}
// reproduce
for(i=0; i<N; i++) {
aa[i]++;
bb[i]+=3;
}
// verify
errors = 0;
for(i=0; i<N; i++) {
if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors);
if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors);
}
printf("#12 got %d errors\n", errors);
#endif
return 0;
}
|
multi_device.h | //
// Created by liqinbin on 10/14/20.
// ThunderGBM multi_device.h: https://github.com/Xtra-Computing/thundergbm/blob/master/include/thundergbm/util/multi_device.h
// Under Apache-2.0 license
// copyright (c) 2020 jiashuai
//
#ifndef FEDTREE_MULTI_DEVICE_H
#define FEDTREE_MULTI_DEVICE_H
#ifdef USE_CUDA
#include "FedTree/common.h"
//switch to specific device and do something, then switch back to the original device
//FIXME make this macro into a function?
#define DO_ON_DEVICE(device_id, something) \
do { \
int org_device_id = 0; \
CUDA_CHECK(cudaGetDevice(&org_device_id)); \
CUDA_CHECK(cudaSetDevice(device_id)); \
something; \
CUDA_CHECK(cudaSetDevice(org_device_id)); \
} while (false)
/**
* Do something on multiple devices, then switch back to the original device
*
*
* example:
*
* DO_ON_MULTI_DEVICES(n_devices, [&](int device_id){
* //do_something_on_device(device_id);
* });
*/
template<typename L>
void DO_ON_MULTI_DEVICES(int n_devices, L do_something) {
int org_device_id = 0;
CUDA_CHECK(cudaGetDevice(&org_device_id));
#pragma omp parallel for num_threads(n_devices)
for (int device_id = 0; device_id < n_devices; device_id++) {
CUDA_CHECK(cudaSetDevice(device_id));
do_something(device_id);
}
CUDA_CHECK(cudaSetDevice(org_device_id));
}
#endif
#endif //FEDTREE_MULTI_DEVICE_H
|
sudoku_task.c | #include <stdio.h>
#include "sudoku.h"
#include "ctimer.h"
#include <omp.h>
int main( int argc, char *argv[] ) {
int mascara[81];
int sol[81];
int i, j;
printf("sudoku inicial: \n");
init_sudoku("muy dificil",sol);
prin_sudoku(sol);
double tiempo, ucpu, scpu;
ctimer( &tiempo, &ucpu, &scpu );
for( i = 1; i <= 9; i++ ) {
for( j = 1; j <= 9; j++ ) {
mascara(i,j) = sol(i,j) != 0;
}
}
#pragma omp parallel
#pragma omp single
sudoku_sol( 1, 1, sol, mascara );
ctimer( &tiempo, &ucpu, &scpu );
printf("Tiempo = %f\n",tiempo);
return 0;
}
void sudoku_sol( int i, int j, int sol1[81], int mascara[81] ) {
int k;
int sol[81];
for (k=0;k<81;k++) sol[k] =sol1[k];
if( mascara(i, j) == 0 ) {
for( k = 1; k <= 9; k++ ) {
sol( i, j ) = k;
if( es_factible( i, j, sol ) ) {
if( i == 9 && j == 9 ) {
printf("Solucion: \n");
prin_sudoku(sol);
}
if( i < 9 && j == 9 ) {
#pragma omp task
sudoku_sol ( i+1, 1, sol, mascara );
}
if( i <= 9 && j < 9 ) {
#pragma omp task
sudoku_sol( i, j+1, sol, mascara );
}
}
}
sol(i, j) = 0;
} else {
if( i == 9 && j == 9 ) {
printf("Solucion: \n");
prin_sudoku(sol);
}
if( i < 9 && j == 9 ) {
#pragma omp task
sudoku_sol ( i+1, 1, sol, mascara );
}
if( i <= 9 && j < 9 ) {
#pragma omp task
sudoku_sol( i , j+1, sol, mascara );
}
}
}
|
Layer_Linear.h | /*
* Layers.h
*
* Created by Guido Novati on 29.10.18.
* Copyright 2018 ETH Zurich. All rights reserved.
*
*/
#pragma once
#include "Layers.h"
template<int nOutputs, int nInputs>
struct LinearLayer: public Layer
{
Params* allocate_params() const override {
return new Params(nInputs*nOutputs, nOutputs);
}
LinearLayer(const int _ID) : Layer(nOutputs, _ID)
{
printf("(%d) Linear Layer of Input:%d Output:%d\n", ID, nInputs, nOutputs);
assert(nOutputs>0 && nInputs>0);
}
void forward(const std::vector<Activation*>& act,
const std::vector<Params*>& param) const override
{
const int batchSize = act[ID]->batchSize;
const Real*const inputs = act[ID-1]->output; //size is batchSize * nInputs
const Real*const weight = param[ID]->weights; //size is nInputs * nOutputs
const Real*const bias = param[ID]->biases; //size is nOutputs
Real*const output = act[ID]->output; //size is batchSize * nOutputs
// TO CHECK: TODO : reset layer's workspace and add the bias
#pragma omp parallel for schedule(static)
for(int b=0; b<batchSize; b++)
for(int n=0; n<nOutputs; n++) output[n + b*nOutputs] = bias[n];
// TO CHECK: TODO : perform the forward step with gemm
gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
batchSize, nOutputs, nInputs,
(Real)1.0, inputs, nInputs,
weight, nOutputs,
(Real)1.0, output, nOutputs);
}
void bckward(const std::vector<Activation*>& act,
const std::vector<Params*>& param,
const std::vector<Params*>& grad) const override
{
// At this point, act[ID]->dError_dOutput contins derivative of error
// with respect to the outputs of the network.
const Real* const deltas = act[ID]->dError_dOutput;
const Real* const inputs = act[ID-1]->output;
const Real* const weight = param[ID]->weights;
const int batchSize = act[ID]->batchSize;
// TO CHECK: TODO: Implement BackProp to compute bias gradient:
{
// This array will contain dError / dBias, has size nOutputs
Real* const grad_B = grad[ID]->biases;
std::fill(grad_B, grad_B + nOutputs, 0);
#pragma omp parallel for schedule(static, 64/sizeof(Real))
for(int n=0; n<nOutputs; n++)
for(int b=0; b<batchSize; b++)
grad_B[n] += deltas[n + b*nOutputs]; }
// TO CHECK: TODO: Implement BackProp to compute weight gradient
{
// This array will contain dError / dBias, has size nInputs * nOutputs
Real* const grad_W = grad[ID]->weights;
gemm(CblasRowMajor, CblasTrans, CblasNoTrans,
nInputs, nOutputs, batchSize,
(Real)1.0, inputs, nInputs,
deltas, nOutputs,
(Real)0.0, grad_W, nOutputs);
}
// TO CHECK: TODO: Implement BackProp to compute dEdO of previous layer
{
Real* const errinp = act[ID-1]->dError_dOutput;
gemm(CblasRowMajor, CblasNoTrans, CblasTrans,
batchSize, nInputs, nOutputs,
(Real)1.0, deltas, nOutputs,
weight, nOutputs,
(Real)0.0, errinp, nInputs);
}
}
void init(std::mt19937& gen, const std::vector<Params*>& param) const override
{
assert(param[ID] not_eq nullptr);
// get pointers to layer's weights and bias
Real *const W = param[ID]->weights, *const B = param[ID]->biases;
assert(param[ID]->nWeights == nInputs*size && param[ID]->nBiases == size);
// initialize weights with Xavier initialization
const Real scale = std::sqrt( 6.0 / (nInputs + size) );
std::uniform_real_distribution<Real> dis(-scale, scale);
std::generate( W, W + nInputs*nOutputs, [&]() { return dis( gen ); } );
std::generate( B, B + nOutputs, [&]() { return dis( gen ); } );
}
};
|
Types.h | //===---------- Types.h - OpenMP types ---------------------------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_TYPES_H
#define OMPTARGET_TYPES_H
/// Base type declarations for freestanding mode
///
///{
using int8_t = char;
using uint8_t = unsigned char;
using int16_t = short;
using uint16_t = unsigned short;
using int32_t = int;
using uint32_t = unsigned int;
using int64_t = long;
using uint64_t = unsigned long;
static_assert(sizeof(int8_t) == 1, "type size mismatch");
static_assert(sizeof(uint8_t) == 1, "type size mismatch");
static_assert(sizeof(int16_t) == 2, "type size mismatch");
static_assert(sizeof(uint16_t) == 2, "type size mismatch");
static_assert(sizeof(int32_t) == 4, "type size mismatch");
static_assert(sizeof(uint32_t) == 4, "type size mismatch");
static_assert(sizeof(int64_t) == 8, "type size mismatch");
static_assert(sizeof(uint64_t) == 8, "type size mismatch");
///}
enum omp_proc_bind_t {
omp_proc_bind_false = 0,
omp_proc_bind_true = 1,
omp_proc_bind_master = 2,
omp_proc_bind_close = 3,
omp_proc_bind_spread = 4
};
enum omp_sched_t {
omp_sched_static = 1, /* chunkSize >0 */
omp_sched_dynamic = 2, /* chunkSize >0 */
omp_sched_guided = 3, /* chunkSize >0 */
omp_sched_auto = 4, /* no chunkSize */
};
enum kmp_sched_t {
kmp_sched_static_chunk = 33,
kmp_sched_static_nochunk = 34,
kmp_sched_dynamic = 35,
kmp_sched_guided = 36,
kmp_sched_runtime = 37,
kmp_sched_auto = 38,
kmp_sched_static_balanced_chunk = 45,
kmp_sched_static_ordered = 65,
kmp_sched_static_nochunk_ordered = 66,
kmp_sched_dynamic_ordered = 67,
kmp_sched_guided_ordered = 68,
kmp_sched_runtime_ordered = 69,
kmp_sched_auto_ordered = 70,
kmp_sched_distr_static_chunk = 91,
kmp_sched_distr_static_nochunk = 92,
kmp_sched_distr_static_chunk_sched_static_chunkone = 93,
kmp_sched_default = kmp_sched_static_nochunk,
kmp_sched_unordered_first = kmp_sched_static_chunk,
kmp_sched_unordered_last = kmp_sched_auto,
kmp_sched_ordered_first = kmp_sched_static_ordered,
kmp_sched_ordered_last = kmp_sched_auto_ordered,
kmp_sched_distribute_first = kmp_sched_distr_static_chunk,
kmp_sched_distribute_last =
kmp_sched_distr_static_chunk_sched_static_chunkone,
/* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
* Since we need to distinguish the three possible cases (no modifier,
* monotonic modifier, nonmonotonic modifier), we need separate bits for
* each modifier. The absence of monotonic does not imply nonmonotonic,
* especially since 4.5 says that the behaviour of the "no modifier" case
* is implementation defined in 4.5, but will become "nonmonotonic" in 5.0.
*
* Since we're passing a full 32 bit value, we can use a couple of high
* bits for these flags; out of paranoia we avoid the sign bit.
*
* These modifiers can be or-ed into non-static schedules by the compiler
* to pass the additional information. They will be stripped early in the
* processing in __kmp_dispatch_init when setting up schedules, so
* most of the code won't ever see schedules with these bits set.
*/
kmp_sched_modifier_monotonic = (1 << 29),
/**< Set if the monotonic schedule modifier was present */
kmp_sched_modifier_nonmonotonic = (1 << 30),
/**< Set if the nonmonotonic schedule modifier was present */
#define SCHEDULE_WITHOUT_MODIFIERS(s) \
(enum kmp_sched_t)( \
(s) & ~(kmp_sched_modifier_nonmonotonic | kmp_sched_modifier_monotonic))
#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sched_modifier_monotonic) != 0)
#define SCHEDULE_HAS_NONMONOTONIC(s) \
(((s)&kmp_sched_modifier_nonmonotonic) != 0)
#define SCHEDULE_HAS_NO_MODIFIERS(s) \
(((s) & (kmp_sched_modifier_nonmonotonic | kmp_sched_modifier_monotonic)) == \
0)
};
struct TaskDescriptorTy;
using TaskFnTy = int32_t (*)(int32_t global_tid, TaskDescriptorTy *taskDescr);
struct TaskDescriptorTy {
void *Payload;
TaskFnTy TaskFn;
};
#pragma omp begin declare variant match(device = {arch(amdgcn)})
using LaneMaskTy = uint64_t;
#pragma omp end declare variant
#pragma omp begin declare variant match( \
device = {arch(amdgcn)}, implementation = {extension(match_none)})
using LaneMaskTy = uint64_t;
#pragma omp end declare variant
namespace lanes {
enum : LaneMaskTy { All = ~(LaneMaskTy)0 };
} // namespace lanes
/// The ident structure that describes a source location. The struct is
/// identical to the one in the kmp.h file. We maintain the same data structure
/// for compatibility.
struct IdentTy {
int32_t reserved_1; /**< might be used in Fortran; see above */
int32_t flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC
identifies this union member */
int32_t reserved_2; /**< not really used in Fortran any more; see above */
int32_t reserved_3; /**< source[4] in Fortran, do not use for C++ */
char const *psource; /**< String describing the source location.
The string is composed of semi-colon separated fields
which describe the source file, the function and a pair
of line numbers that delimit the construct. */
};
using __kmpc_impl_lanemask_t = LaneMaskTy;
using ParallelRegionFnTy = void *;
using CriticalNameTy = int32_t[8];
struct omp_lock_t {
void *Lock;
};
using InterWarpCopyFnTy = void (*)(void *src, int32_t warp_num);
using ShuffleReductFnTy = void (*)(void *rhsData, int16_t lane_id,
int16_t lane_offset, int16_t shortCircuit);
using ListGlobalFnTy = void (*)(void *buffer, int idx, void *reduce_data);
/// Macros for allocating variables in different address spaces.
///{
// Follows the pattern in interface.h
typedef enum omp_allocator_handle_t {
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
KMP_ALLOCATOR_MAX_HANDLE = ~(0U)
} omp_allocator_handle_t;
enum OMPTgtExecModeFlags : int8_t {
OMP_TGT_EXEC_MODE_GENERIC = 1 << 0,
OMP_TGT_EXEC_MODE_SPMD = 1 << 1,
};
#define __PRAGMA(STR) _Pragma(#STR)
#define OMP_PRAGMA(STR) __PRAGMA(omp STR)
#define SHARED(NAME) \
NAME [[clang::loader_uninitialized]]; \
OMP_PRAGMA(allocate(NAME) allocator(omp_pteam_mem_alloc))
// TODO: clang should use address space 5 for omp_thread_mem_alloc, but right
// now that's not the case.
#define THREAD_LOCAL(NAME) \
NAME [[clang::loader_uninitialized, clang::address_space(5)]]
// TODO: clang should use address space 4 for omp_const_mem_alloc, maybe it
// does?
#define CONSTANT(NAME) \
NAME [[clang::loader_uninitialized, clang::address_space(4)]]
///}
#endif
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
/// The location of the declaration name itself.
SourceLocation Loc;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 3;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 3;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
GB_bitmap_AxB_saxpy_A_sparse_B_bitmap_template.c | //------------------------------------------------------------------------------
// GB_bitmap_AxB_saxpy_A_sparse_B_bitmap: C<#M>+=A*B, C bitmap, M any format
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is bitmap or full. A is hyper/sparse, B is bitmap/full.
// if C is bitmap: no accumulator is used
// if C is full: C += A*B is computed with the accumulator identical to
// the monoid
{
if (use_coarse_tasks)
{
//----------------------------------------------------------------------
// C<#M> += A*B using coarse tasks
//----------------------------------------------------------------------
// number of columns in the workspace for each task
#define GB_PANEL_SIZE 4
if (B_iso)
{
// No special cases needed. GB_GETB handles the B iso case.
}
//----------------------------------------------------------------------
// allocate workspace for each task
//----------------------------------------------------------------------
GB_WERK_PUSH (H_slice, ntasks, int64_t) ;
if (H_slice == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
int64_t hwork = 0 ;
int tid ;
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ;
int64_t jtask = jend - jstart ;
int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ;
H_slice [tid] = hwork ;
#if ( !GB_C_IS_BITMAP )
// bitmap case always needs Hx workspace; full case only needs it
// if jpanel > 1
if (jpanel > 1)
#endif
{
hwork += jpanel ;
}
}
//----------------------------------------------------------------------
int64_t cvlenx = (GB_IS_ANY_PAIR_SEMIRING ? 0 : cvlen) * GB_CSIZE ;
#if GB_C_IS_BITMAP
Wf = GB_MALLOC_WORK (hwork * cvlen, int8_t, &Wf_size) ;
#endif
Wcx = GB_MALLOC_WORK (hwork * cvlenx, GB_void, &Wcx_size) ;
if ((GB_C_IS_BITMAP && Wf == NULL) || Wcx == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// C<#M> += A*B
//----------------------------------------------------------------------
#if GB_C_IS_BITMAP
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the vectors of B and C for this coarse task
//------------------------------------------------------------------
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ;
int64_t jtask = jend - jstart ;
int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ;
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
//------------------------------------------------------------------
// get the workspace for this task
//------------------------------------------------------------------
// Hf and Hx workspace to compute the panel of C
#if GB_C_IS_BITMAP
int8_t *restrict Hf = Wf + (H_slice [tid] * cvlen) ;
#endif
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + H_slice [tid] * cvlenx);
#endif
//------------------------------------------------------------------
// clear the panel
//------------------------------------------------------------------
#if GB_C_IS_BITMAP
memset (Hf, 0, jpanel * cvlen) ;
#endif
//------------------------------------------------------------------
// C<#M>(:,jstart:jend-1) += A * B(:,jstart:jend-1) by panel
//------------------------------------------------------------------
for (int64_t j1 = jstart ; j1 < jend ; j1 += jpanel)
{
//--------------------------------------------------------------
// get the panel of np vectors j1:j2-1
//--------------------------------------------------------------
int64_t j2 = GB_IMIN (jend, j1 + jpanel) ;
int64_t np = j2 - j1 ;
//--------------------------------------------------------------
// G = B(:,j1:j2-1), of size bvlen-by-np, in column major order
//--------------------------------------------------------------
int8_t *restrict Gb = (int8_t *) (Bb + (j1 * bvlen)) ;
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_BTYPE *restrict Gx = (GB_BTYPE *)
(((GB_void *) (B->x)) +
(B_iso ? 0 : ((j1 * bvlen) * GB_BSIZE))) ;
#endif
//--------------------------------------------------------------
// clear the panel H to compute C(:,j1:j2-1)
//--------------------------------------------------------------
#if ( !GB_C_IS_BITMAP )
if (np == 1)
{
// Make H an alias to C(:,j1)
int64_t j = j1 ;
int64_t pC_start = j * cvlen ; // get pointer to C(:,j)
Hx = Cx + pC_start ;
}
else
{
// Hx = identity
int64_t nc = np * cvlen ;
#if GB_HAS_IDENTITY_BYTE
memset (Hx, GB_IDENTITY_BYTE, nc * GB_CSIZE) ;
#else
for (int64_t i = 0 ; i < nc ; i++)
{
Hx [i] = GB_IDENTITY ;
}
#endif
}
#endif
#if GB_IS_PLUS_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//--------------------------------------------------------------
// H += A*G for one panel
//--------------------------------------------------------------
#undef GB_B_kj_PRESENT
#if GB_B_IS_BITMAP
#define GB_B_kj_PRESENT(b) b
#else
#define GB_B_kj_PRESENT(b) 1
#endif
#undef GB_MULT_A_ik_G_kj
#if GB_IS_PAIR_MULTIPLIER
// t = A(i,k) * B (k,j) is already #defined as 1
#define GB_MULT_A_ik_G_kj(gkj,jj)
#else
// t = A(i,k) * B (k,j)
#define GB_MULT_A_ik_G_kj(gkj,jj) \
GB_CIJ_DECLARE (t) ; \
GB_MULT (t, aik, gkj, i, k, j1 + jj)
#endif
#undef GB_HX_COMPUTE
#if GB_C_IS_BITMAP
#define GB_HX_COMPUTE(gkj,gb,jj) \
{ \
/* H (i,jj) += A(i,k) * B(k,j) */ \
if (GB_B_kj_PRESENT (gb)) \
{ \
/* t = A(i,k) * B (k,j) */ \
GB_MULT_A_ik_G_kj (gkj, jj) ; \
if (Hf [pH+jj] == 0) \
{ \
/* H(i,jj) is a new entry */ \
GB_HX_WRITE (pH+jj, t) ; /* Hx(i,jj)=t */ \
Hf [pH+jj] = 1 ; \
} \
else \
{ \
/* H(i,jj) is already present */ \
/* Hx(i,jj)+=t */ \
GB_HX_UPDATE (pH+jj, t) ; \
} \
} \
}
#else
#define GB_HX_COMPUTE(gkj,gb,jj) \
{ \
/* H (i,jj) += A(i,k) * B(k,j) */ \
if (GB_B_kj_PRESENT (gb)) \
{ \
/* t = A(i,k) * B (k,j) */ \
GB_MULT_A_ik_G_kj (gkj, jj) ; \
/* Hx(i,jj)+=t */ \
GB_HX_UPDATE (pH+jj, t) ; \
} \
}
#endif
switch (np)
{
case 4 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1)
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k ] ;
const int8_t gb1 = Gb [k + bvlen] ;
const int8_t gb2 = Gb [k + 2*bvlen] ;
const int8_t gb3 = Gb [k + 3*bvlen] ;
if (!(gb0 || gb1 || gb2 || gb3)) continue ;
#endif
GB_GETB (gk0, Gx, k , B_iso) ;
GB_GETB (gk1, Gx, k + bvlen, B_iso) ;
GB_GETB (gk2, Gx, k + 2*bvlen, B_iso) ;
GB_GETB (gk3, Gx, k + 3*bvlen, B_iso) ;
// H += A(:,k)*B(k,j1:j2-1)
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i * 4 ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, gb0, 0) ;
GB_HX_COMPUTE (gk1, gb1, 1) ;
GB_HX_COMPUTE (gk2, gb2, 2) ;
GB_HX_COMPUTE (gk3, gb3, 3) ;
}
}
break ;
case 3 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1)
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k ] ;
const int8_t gb1 = Gb [k + bvlen] ;
const int8_t gb2 = Gb [k + 2*bvlen] ;
if (!(gb0 || gb1 || gb2)) continue ;
#endif
GB_GETB (gk0, Gx, k , B_iso) ;
GB_GETB (gk1, Gx, k + bvlen, B_iso) ;
GB_GETB (gk2, Gx, k + 2*bvlen, B_iso) ;
// H += A(:,k)*B(k,j1:j2-1)
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i * 3 ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, gb0, 0) ;
GB_HX_COMPUTE (gk1, gb1, 1) ;
GB_HX_COMPUTE (gk2, gb2, 2) ;
}
}
break ;
case 2 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1)
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k ] ;
const int8_t gb1 = Gb [k + bvlen] ;
if (!(gb0 || gb1)) continue ;
#endif
// H += A(:,k)*B(k,j1:j2-1)
GB_GETB (gk0, Gx, k , B_iso) ;
GB_GETB (gk1, Gx, k + bvlen, B_iso) ;
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i * 2 ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, gb0, 0) ;
GB_HX_COMPUTE (gk1, gb1, 1) ;
}
}
break ;
case 1 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1) where j1 == j2-1
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k] ;
if (!gb0) continue ;
#endif
// H += A(:,k)*B(k,j1:j2-1)
GB_GETB (gk0, Gx, k, B_iso) ;
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, 1, 0) ;
}
}
break ;
default:;
}
#undef GB_HX_COMPUTE
#undef GB_B_kj_PRESENT
#undef GB_MULT_A_ik_G_kj
//--------------------------------------------------------------
// C<#M>(:,j1:j2-1) = H
//--------------------------------------------------------------
#if ( !GB_C_IS_BITMAP )
if (np == 1)
{
// Hx is already aliased to Cx; no more work to do
continue ;
}
#endif
for (int64_t jj = 0 ; jj < np ; jj++)
{
//----------------------------------------------------------
// C<#M>(:,j) = H (:,jj)
//----------------------------------------------------------
int64_t j = j1 + jj ;
int64_t pC_start = j * cvlen ; // get pointer to C(:,j)
for (int64_t i = 0 ; i < cvlen ; i++)
{
int64_t pC = pC_start + i ; // pointer to C(i,j)
int64_t pH = i * np + jj ; // pointer to H(i,jj)
#if GB_C_IS_BITMAP
if (!Hf [pH]) continue ;
Hf [pH] = 0 ; // clear the panel
int8_t cb = Cb [pC] ;
#endif
//------------------------------------------------------
// check M(i,j)
//------------------------------------------------------
#if GB_MASK_IS_SPARSE_OR_HYPER
// M is sparse or hypersparse
bool mij = ((cb & 2) != 0) ^ Mask_comp ;
if (!mij) continue ;
cb = (cb & 1) ;
#elif GB_MASK_IS_BITMAP_OR_FULL
// M is bitmap or full
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
if (!mij) continue ;
#endif
//------------------------------------------------------
// C(i,j) += H(i,jj)
//------------------------------------------------------
#if GB_C_IS_BITMAP
if (cb == 0)
{
// C(i,j) = H(i,jj)
GB_CIJ_GATHER (pC, pH) ;
Cb [pC] = keep ;
task_cnvals++ ;
}
else
{
// Currently, the matrix C is a newly allocated
// matrix, not the C_in input matrix to GrB_mxm.
// As a result, this condition is not used. It
// will be in the future when this method is
// modified to modify C in-place.
ASSERT (GB_DEAD_CODE) ;
// C(i,j) += H(i,jj)
GB_CIJ_GATHER_UPDATE (pC, pH) ;
}
#else
{
// C(i,j) = H(i,jj)
GB_CIJ_GATHER_UPDATE (pC, pH) ;
}
#endif
}
}
}
#if GB_C_IS_BITMAP
cnvals += task_cnvals ;
#endif
}
#undef GB_PANEL_SIZE
}
else if (use_atomics)
{
//----------------------------------------------------------------------
// C<#M> += A*B using fine tasks and atomics
//----------------------------------------------------------------------
if (B_iso)
{
// No special cases needed. GB_GET_B_kj (bkj = B(k,j))
// handles the B iso case.
}
int tid ;
#if GB_C_IS_BITMAP
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the vector of B and C for this fine task
//------------------------------------------------------------------
// The fine task operates on C(:,j) and B(:,j). Its fine task
// id ranges from 0 to nfine_tasks_per_vector-1, and determines
// which slice of A to operate on.
int64_t j = tid / nfine_tasks_per_vector ;
int fine_tid = tid % nfine_tasks_per_vector ;
int64_t kfirst = A_slice [fine_tid] ;
int64_t klast = A_slice [fine_tid + 1] ;
int64_t pB_start = j * bvlen ; // pointer to B(:,j)
int64_t pC_start = j * cvlen ; // pointer to C(:,j)
GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ*
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
// for Hx Gustavason workspace: use C(:,j) in-place:
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = (GB_CTYPE *)
(((GB_void *) Cx) + (pC_start * GB_CSIZE)) ;
#endif
#if GB_IS_PLUS_FC32_MONOID || GB_IS_ANY_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID || GB_IS_ANY_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//------------------------------------------------------------------
// C<#M>(:,j) += A(:,k1:k2) * B(k1:k2,j)
//------------------------------------------------------------------
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
//--------------------------------------------------------------
// C<#M>(:,j) += A(:,k) * B(k,j)
//--------------------------------------------------------------
int64_t k = GBH (Ah, kk) ; // k in range k1:k2
int64_t pB = pB_start + k ; // get pointer to B(k,j)
#if GB_B_IS_BITMAP
if (!GBB (Bb, pB)) continue ;
#endif
int64_t pA = Ap [kk] ;
int64_t pA_end = Ap [kk+1] ;
GB_GET_B_kj ; // bkj = B(k,j)
for ( ; pA < pA_end ; pA++)
{
//----------------------------------------------------------
// get A(i,k) and C(i,j)
//----------------------------------------------------------
int64_t i = Ai [pA] ; // get A(i,k) index
int64_t pC = pC_start + i ; // get C(i,j) pointer
//----------------------------------------------------------
// C<#M>(i,j) += A(i,k) * B(k,j)
//----------------------------------------------------------
#if ( !GB_C_IS_BITMAP )
{
//------------------------------------------------------
// C is full: the monoid is always atomic
//------------------------------------------------------
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
}
#elif GB_MASK_IS_SPARSE_OR_HYPER
{
//------------------------------------------------------
// M is sparse, and scattered into the C bitmap
//------------------------------------------------------
// finite-state machine in Cb [pC]:
// 0: cij not present, mij zero
// 1: cij present, mij zero (keep==1 for !M)
// 2: cij not present, mij one
// 3: cij present, mij one (keep==3 for M)
// 7: cij is locked
int8_t cb ;
#if GB_HAS_ATOMIC
{
// if C(i,j) is already present and can be modified
// (cb==keep), and the monoid can be done
// atomically, then do the atomic update. No need
// to modify Cb [pC].
GB_ATOMIC_READ
cb = Cb [pC] ; // grab the entry
if (cb == keep)
{
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
continue ; // C(i,j) has been updated
}
}
#endif
do // lock the entry
{
// do this atomically:
// { cb = Cb [pC] ; Cb [pC] = 7 ; }
GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ;
} while (cb == 7) ; // lock owner gets 0, 1, 2, or 3
if (cb == keep-1)
{
// C(i,j) is a new entry
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t
task_cnvals++ ;
cb = keep ; // keep the entry
}
else if (cb == keep)
{
// C(i,j) is already present
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
}
GB_ATOMIC_WRITE
Cb [pC] = cb ; // unlock the entry
}
#else
{
//------------------------------------------------------
// M is not present, or bitmap/full
//------------------------------------------------------
// finite-state machine in Cb [pC]:
// 0: cij not present; can be written
// 1: cij present; can be updated
// 7: cij is locked
#if GB_MASK_IS_BITMAP_OR_FULL
{
// M is bitmap or full, and not in C bitmap.
// Do not modify C(i,j) if not permitted by the mask
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
if (!mij) continue ;
}
#endif
//------------------------------------------------------
// C(i,j) += A(i,j) * B(k,j)
//------------------------------------------------------
int8_t cb ;
#if GB_HAS_ATOMIC
{
// if C(i,j) is already present (cb==1), and the
// monoid can be done atomically, then do the
// atomic update. No need to modify Cb [pC].
GB_ATOMIC_READ
cb = Cb [pC] ; // grab the entry
if (cb == 1)
{
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
continue ; // C(i,j) has been updated
}
}
#endif
do // lock the entry
{
// do this atomically:
// { cb = Cb [pC] ; Cb [pC] = 7 ; }
GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ;
} while (cb == 7) ; // lock owner gets 0 or 1
if (cb == 0)
{
// C(i,j) is a new entry
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t
task_cnvals++ ;
}
else // cb == 1
{
// C(i,j) is already present
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
}
GB_ATOMIC_WRITE
Cb [pC] = 1 ; // unlock the entry
}
#endif
}
}
#if GB_C_IS_BITMAP
cnvals += task_cnvals ;
#endif
}
}
else
{
//----------------------------------------------------------------------
// C<#M> += A*B using fine tasks and workspace, with no atomics
//----------------------------------------------------------------------
// Each fine task is given size-cvlen workspace to compute its result
// in the first phase, W(:,tid) = A(:,k1:k2) * B(k1:k2,j), where k1:k2
// is defined by the fine_tid of the task. The workspaces are then
// summed into C in the second phase.
if (B_iso)
{
// No special cases needed. GB_GET_B_kj (bkj = B(k,j))
// handles the B iso case.
}
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
size_t workspace = cvlen * ntasks ;
size_t cxsize = (GB_IS_ANY_PAIR_SEMIRING) ? 0 : GB_CSIZE ;
#if GB_C_IS_BITMAP
Wf = GB_MALLOC_WORK (workspace, int8_t, &Wf_size) ;
#endif
Wcx = GB_MALLOC_WORK (workspace * cxsize, GB_void, &Wcx_size) ;
if ((GB_C_IS_BITMAP && Wf == NULL) || Wcx == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// first phase: W (:,tid) = A (:,k1:k2) * B (k2:k2,j) for each fine task
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the vector of B and C for this fine task
//------------------------------------------------------------------
// The fine task operates on C(:,j) and B(:,j). Its fine task
// id ranges from 0 to nfine_tasks_per_vector-1, and determines
// which slice of A to operate on.
int64_t j = tid / nfine_tasks_per_vector ;
int fine_tid = tid % nfine_tasks_per_vector ;
int64_t kfirst = A_slice [fine_tid] ;
int64_t klast = A_slice [fine_tid + 1] ;
int64_t pB_start = j * bvlen ; // pointer to B(:,j)
int64_t pC_start = j * cvlen ; // pointer to C(:,j), for bitmap
int64_t pW_start = tid * cvlen ; // pointer to W(:,tid)
GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ*
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
// for Hf and Hx Gustavason workspace: use W(:,tid):
#if GB_C_IS_BITMAP
int8_t *restrict Hf = Wf + pW_start ;
#endif
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + (pW_start * cxsize)) ;
#endif
#if GB_IS_PLUS_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//------------------------------------------------------------------
// clear the panel
//------------------------------------------------------------------
#if GB_C_IS_BITMAP
{
memset (Hf, 0, cvlen) ;
}
#else
{
// set Hx to identity
#if GB_HAS_IDENTITY_BYTE
memset (Hx, GB_IDENTITY_BYTE, cvlen * GB_CSIZE) ;
#else
for (int64_t i = 0 ; i < cvlen ; i++)
{
Hx [i] = GB_IDENTITY ;
}
#endif
}
#endif
//------------------------------------------------------------------
// W<#M> = A(:,k1:k2) * B(k1:k2,j)
//------------------------------------------------------------------
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
//--------------------------------------------------------------
// W<#M>(:,tid) += A(:,k) * B(k,j)
//--------------------------------------------------------------
int64_t k = GBH (Ah, kk) ; // k in range k1:k2
int64_t pB = pB_start + k ; // get pointer to B(k,j)
#if GB_B_IS_BITMAP
if (!GBB (Bb, pB)) continue ;
#endif
int64_t pA = Ap [kk] ;
int64_t pA_end = Ap [kk+1] ;
GB_GET_B_kj ; // bkj = B(k,j)
for ( ; pA < pA_end ; pA++)
{
//----------------------------------------------------------
// get A(i,k)
//----------------------------------------------------------
int64_t i = Ai [pA] ; // get A(i,k) index
//----------------------------------------------------------
// check M(i,j)
//----------------------------------------------------------
#if GB_MASK_IS_SPARSE_OR_HYPER
{
// M is sparse or hypersparse
int64_t pC = pC_start + i ;
int8_t cb = Cb [pC] ;
bool mij = ((cb & 2) != 0) ^ Mask_comp ;
if (!mij) continue ;
}
#elif GB_MASK_IS_BITMAP_OR_FULL
{
// M is bitmap or full
int64_t pC = pC_start + i ;
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
if (!mij) continue ;
}
#endif
//----------------------------------------------------------
// W<#M>(i) += A(i,k) * B(k,j)
//----------------------------------------------------------
#if GB_IS_ANY_PAIR_SEMIRING
{
Hf [i] = 1 ;
}
#else
{
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
#if GB_C_IS_BITMAP
if (Hf [i] == 0)
{
// W(i) is a new entry
GB_HX_WRITE (i, t) ; // Hx(i) = t
Hf [i] = 1 ;
}
else
#endif
{
// W(i) is already present
GB_HX_UPDATE (i, t) ; // Hx(i) += t
}
}
#endif
}
}
}
//----------------------------------------------------------------------
// second phase: C<#M> += reduce (W)
//----------------------------------------------------------------------
#if GB_C_IS_BITMAP
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the W and C for this fine task
//------------------------------------------------------------------
// The fine task operates on C(i1:i2,j) and W(i1:i2,w1:w2), where
// i1:i2 is defined by the fine task id. Its fine task id ranges
// from 0 to nfine_tasks_per_vector-1.
// w1:w2 are the updates to C(:,j), where w1:w2 =
// [j*nfine_tasks_per_vector : (j+1)*nfine_tasks_per_vector-1].
int64_t j = tid / nfine_tasks_per_vector ;
int fine_tid = tid % nfine_tasks_per_vector ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, cvlen, fine_tid,
nfine_tasks_per_vector) ;
int64_t pC_start = j * cvlen ; // pointer to C(:,j)
int64_t wstart = j * nfine_tasks_per_vector ;
int64_t wend = (j + 1) * nfine_tasks_per_vector ;
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
// Hx = (typecasted) Wcx workspace, use Wf as-is
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = ((GB_CTYPE *) Wcx) ;
#endif
#if GB_IS_PLUS_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//------------------------------------------------------------------
// C<#M>(i1:i2,j) += reduce (W (i2:i2, wstart:wend))
//------------------------------------------------------------------
for (int64_t w = wstart ; w < wend ; w++)
{
//--------------------------------------------------------------
// C<#M>(i1:i2,j) += W (i1:i2,w)
//--------------------------------------------------------------
int64_t pW_start = w * cvlen ; // pointer to W (:,w)
for (int64_t i = istart ; i < iend ; i++)
{
//----------------------------------------------------------
// get pointer and bitmap C(i,j) and W(i,w)
//----------------------------------------------------------
int64_t pW = pW_start + i ; // pointer to W(i,w)
#if GB_C_IS_BITMAP
if (Wf [pW] == 0) continue ; // skip if not present
#endif
int64_t pC = pC_start + i ; // pointer to C(i,j)
#if GB_C_IS_BITMAP
int8_t cb = Cb [pC] ; // bitmap status of C(i,j)
#endif
//----------------------------------------------------------
// M(i,j) already checked, but adjust Cb if M is sparse
//----------------------------------------------------------
#if GB_MASK_IS_SPARSE_OR_HYPER
{
// M is sparse or hypersparse
cb = (cb & 1) ;
}
#endif
//----------------------------------------------------------
// C(i,j) += W (i,w)
//----------------------------------------------------------
#if GB_C_IS_BITMAP
if (cb == 0)
{
// C(i,j) = W(i,w)
GB_CIJ_GATHER (pC, pW) ;
Cb [pC] = keep ;
task_cnvals++ ;
}
else
#endif
{
// C(i,j) += W(i,w)
GB_CIJ_GATHER_UPDATE (pC, pW) ;
}
}
}
#if GB_C_IS_BITMAP
cnvals += task_cnvals ;
#endif
}
}
}
|
transforms.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <omp.h>
#include "logger.h"
//void morlet(int ndata, int n_nu, int n_eta, double conv_ext, double fourier_b,
// double *data, double *nu, double *eta, double complex *out){
// // Discrete Morlet Wavelet transform, using Morlet basis from Goupillaud 1984 (Eq. 5, 6 - with b=2pi)
//
// int ix, jnuc,jeta, jnu, thisn;
// double exponent, mag, extent, dt;
//
// dt = nu[1] - nu[0];
//
// double sqrt2 = sqrt(2.0);
// int index = 0;
//
// for (ix=0;ix<ndata;ix++){
// for (jnuc=0;jnuc<n_nu;jnuc++){
// for (jeta=0; jeta<n_eta;jeta++){
// extent = 1/(eta[jeta]*sqrt2);
// thisn = ceil(conv_ext*extent/dt);
//
// for (jnu=fmax(0, jnuc-thisn); jnu<fmin(jnuc+thisn, n_nu); jnu++){
// exponent = eta[jeta]*(nu[jnu] - nu[jnuc]);
// out[index] += data[ix*n_nu + jnu]*cexp(-exponent*(exponent/2 + fourier_b*I));
// }
// index++;
// }
// }
// }
//}
inline int max(int a, int b) {
return a > b ? a : b;
}
inline int min(int a, int b) {
return a < b ? a : b;
}
void cmorlet(unsigned int ndata, unsigned int n_nu, unsigned int n_eta,
double conv_ext, double fourier_b,
double complex *data, double *nu, double *eta, int nthreads,
double complex *out){
/*
Discrete Morlet Wavelet transform
=================================
Uses Morlet basis from Goupillaud 1984 (Eq. 5, 6 - with b=2pi)
Notes
-----
The SHAPE of any of the below args indicates the *ordering* of the
raveled array, with last axis moving first.
Args
----
ndata (int) :
Number of different data sets to be transformed (each is independent)
n_nu (int) :
Number of real-space cells (eg. frequencies, in terms of visibilities)
n_eta (int) :
Number of fourier-space cells to transform to, should be ~1/2 n_nu
conv_ext (double) :
Convergence extent. The number of Morlet kernel sigma "widths" to
actually perform integration for. Should be ~5 or more.
fourier_b (double) :
The Fourier convention, i.e. the Fourier kernel is e^{-bi nu*eta}.
data (double complex, SHAPE=[n_nu, ndata]) :
The input (complex) data.
nu (double, SHAPE=[n_nu]):
Real-space co-ordinates (i.e. frequencies, in terms of visibilities)
nthreads (int) :
Number of threads to use in OMP.
eta (double, SHAPE=[n_eta]):
Fourier-space co-ordinates (dual of nu).
Returns
-------
out (double complex, SHAPE=[n_eta, n_nu, ndata]):
The resulting Morlet transform.
*/
unsigned int ix, jnuc,jeta, jnu, jidx, jmin, jmax;
double exponent;
double complex xx;
int thisn;
double sqrt2 = sqrt(2.0);
unsigned int out_idx = 0;
unsigned int data_idx = 0;
double sqrt2dnu = sqrt2*(nu[1] - nu[0]);
omp_set_num_threads(nthreads);
#pragma omp parallel for private(thisn, jidx, out_idx, jnuc, jmin, jmax, data_idx, exponent, xx, jnu, ix)
for (jeta=0; jeta<n_eta;jeta++){ // Loop through eta
thisn = ceil(conv_ext/(eta[jeta]*sqrt2dnu));
// We do this to be able to multi-thread
jidx = jeta * n_nu * ndata;
out_idx = 0;
LOG_DEBUG("jeta=%d, jidx=%d, thisn=%d", jeta, jidx, thisn);
for (jnuc=0;jnuc<n_nu;jnuc++){ // Loop through nu_centre
jmin = max(0, jnuc-thisn);
jmax = min(jnuc+thisn, n_nu);
data_idx = jmin*ndata;
LOG_SUPER_DEBUG("jnuc=%d, jmin=%d, jmax=%d", jnuc, jmin, jmax);
for (jnu=jmin; jnu<jmax; jnu++){ // Loop through nu (i.e. do the FT)
exponent = eta[jeta]*(nu[jnu] - nu[jnuc]);
xx = cexp(-exponent*(exponent/2 + fourier_b*I));
for (ix=0;ix<ndata;ix++){ // Loop through different data
// if(jidx + out_idx >= n_eta*n_nu*ndata){
// printf("Out of bounds on: jeta=%d, jnuc=%d, jnu=%d, ix=%d, jidx=%d, out_idx=%d\n", jeta, jnuc, jnu, ix, jidx, out_idx);
// }
out[jidx + out_idx] += data[data_idx]*xx;
if(jeta==(n_eta-1) && jnuc==(n_nu-1) && ix==(ndata-1))
LOG_ULTRA_DEBUG("\t\tjnu=%d ix=%d indx=%d jidx=%d, out_idx=%d, data=%g + %gi xx=%g out=%g + %gi", jnu, ix, jidx+out_idx, jidx, out_idx, creal(data[data_idx]), cimag(data[data_idx]), xx, creal(out[jidx + out_idx]), cimag(out[jidx + out_idx]));
data_idx++;
out_idx++;
}
out_idx -= ndata; // out_idx should not contain jnu, so reset it.
}
out_idx += ndata;
}
}
}
void BlackmanHarris_cmorlet(unsigned int ndata, unsigned int n_nu, unsigned int n_eta,
double conv_ext, double *BlackmanHarrisFilter,
double complex *data, double *nu, double *eta, int nthreads,
double complex *out){
/*
Discrete Morlet Wavelet transform but replacing Gaussian filter with BlackmanHarris.
BlackmanHarris form taken from scipy.signal.windown.blackmanharris (copied below)
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.blackmanharris.html#scipy.signal.windows.blackmanharris
Otherwise as above.
=================================
*/
unsigned int ix, jnuc,jeta, jnu, jidx, jmin, jmax;
double exponent;
double complex xx;
int thisn;
double sqrt2 = sqrt(2.0);
unsigned int out_idx = 0;
unsigned int data_idx = 0;
double sqrt2dnu = sqrt2*(nu[1] - nu[0]);
omp_set_num_threads(nthreads);
#pragma omp parallel for private(thisn, jidx, out_idx, jnuc, jmin, jmax, data_idx, exponent, xx, jnu, ix)
for (jeta=0; jeta<n_eta;jeta++){ // Loop through eta
thisn = ceil(conv_ext/(eta[jeta]*sqrt2dnu));
// We do this to be able to multi-thread
jidx = jeta * n_nu * ndata;
out_idx = 0;
LOG_DEBUG("jeta=%d, jidx=%d, thisn=%d", jeta, jidx, thisn);
for (jnuc=0;jnuc<n_nu;jnuc++){ // Loop through nu_centre
jmin = max(0, jnuc-thisn);
jmax = min(jnuc+thisn, n_nu);
data_idx = jmin*ndata;
LOG_SUPER_DEBUG("jnuc=%d, jmin=%d, jmax=%d", jnuc, jmin, jmax);
for (jnu=jmin; jnu<jmax; jnu++){ // Loop through nu (i.e. do the FT)
exponent = eta[jeta]*(nu[jnu] - nu[jnuc]);
//xx = cexp(-exponent*(exponent/2 + fourier_b*I)); // --Filter change Fourier to blackmanharris.
//general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym)
xx = cexp(-exponent*exponent/2 - BlackmanHarrisFilter[jnuc]);
for (ix=0;ix<ndata;ix++){ // Loop through different data
// if(jidx + out_idx >= n_eta*n_nu*ndata){
// printf("Out of bounds on: jeta=%d, jnuc=%d, jnu=%d, ix=%d, jidx=%d, out_idx=%d\n", jeta, jnuc, jnu, ix, jidx, out_idx);
// }
out[jidx + out_idx] += data[data_idx]*xx;
if(jeta==(n_eta-1) && jnuc==(n_nu-1) && ix==(ndata-1))
LOG_ULTRA_DEBUG("\t\tjnu=%d ix=%d indx=%d jidx=%d, out_idx=%d, data=%g + %gi xx=%g out=%g + %gi", jnu, ix, jidx+out_idx, jidx, out_idx, creal(data[data_idx]), cimag(data[data_idx]), xx, creal(out[jidx + out_idx]), cimag(out[jidx + out_idx]));
data_idx++;
out_idx++;
}
out_idx -= ndata; // out_idx should not contain jnu, so reset it.
}
out_idx += ndata;
}
}
}
/* def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.windows.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym)
def general_cosine(M, a, sym=True):
r"""
Generic weighted sum of cosine terms window
Parameters
----------
M : int
Number of points in the output window
a : array_like
Sequence of weighting coefficients. This uses the convention of being
centered on the origin, so these will typically all be positive
numbers, not alternating sign.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Heinzel describes a flat-top window named "HFT90D" with formula: [2]_
.. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z)
- 0.440811 \cos(3z) + 0.043097 \cos(4z)
where
.. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1
Since this uses the convention of starting at the origin, to reproduce the
window, we need to convert every other coefficient to a positive number:
>>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097]
The paper states that the highest sidelobe is at -90.2 dB. Reproduce
Figure 42 by plotting the window and its frequency response, and confirm
the sidelobe level in red:
>>> from scipy.signal.windows import general_cosine
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = general_cosine(1000, HFT90D, sym=False)
>>> plt.plot(window)
>>> plt.title("HFT90D window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 10000) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-50/1000, 50/1000, -140, 0])
>>> plt.title("Frequency response of the HFT90D window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.axhline(-90.2, color='red')
>>> plt.show()
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.linspace(-np.pi, np.pi, M)
w = np.zeros(M)
for k in range(len(a)):
w += a[k] * np.cos(k * fac)
return _truncate(w, needs_trunc)
*/
|
GB_unaryop__abs_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_fp64
// op(A') function: GB_tran__abs_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_fp64
(
int8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sc35_fixedRepulse.c | /*MODIFICATIONS:
when wl=3 .. rotation of 0th particle it is allowed to move
pokousime se odstranit bug ze castice pri wl=1 se obcas zasekne
je to kvuli pohybu center of mass
zrejme je chybou tim ze kvuli akumulaci numerickych chyb se cas od casu prepocita centrum hmotnosti systemu
u toho se pouziji periodicke okrajove podminky, ktere to cele mohou zkazit protoze castice co odletela do druheho boxu
pri navratu od primarniho boxu (-z/2 do z/2) posune stred hmostnosti
testujeme - odstranili jsme prepocet stredu hmotnosti z obav z numerickych chyb
vypis "F I N I S H E D" do stdout po splneni wl podminek
*/
/*TODO in future s
- Non-equilibrium candidate moves
- check scaling of particles of different sizes - should scale with contact area!
- cell list - divide simulation box in cells where
particles interact with each other and outside is definitely 0 - safe time
better scaling with system size, possibly long spherovylinders could be in several
celles to keep good scaling
- better cluster algorithm - put in wang-landau
- cluster list work for spherocylinders only now
*/
/*------------------------------------------------------------------------------
Version 3.5
- linear bond at spherocylinders, where second spherocilinder is harmonicaly
attached to a point that is in distance of bondlength from the first spherocylinder
and it follows the direction of spherocylinder
- bonded particles belong to the same cluster
- print energy at statistical reports
- have particles of different lengths
- interaction scaling back to v1+v2 (no addition of 1.0) - more physical
*/
/*------------------------------------------------------------------------------
Version 3.4
- New handling of the option file
- reaction coordinate radius around z axis for a pore calculations
- reaction coordinate as number of particles in contact (defined by distance of CMs)
- 2D Wang-Landau method
- New Wang-Landau coordinate - radius pore in vesicle around begining of xy plane
- New models TPSC, TCPSC, TCHPSC, TCHCPSC- models with two patches
note that switch function on sides of patch are linear in cos angle not in angle
as a results two patches with overlaping sides do not compensate easily to a flat profile
- FIX chirality was doubled (angle twice as large)
- Added posibility of exluded interactions [EXCLUDE] in topology file
- MPI replica exchange with different temperatures and pressure (paraltemp paralpress)
input configuration is #{number of process}config.init, if it does not exist config.init is used
each replica is with different random seed = seed+mpirank
- config.init can look like movie snapshot
- MPI exchange with Wang-Landau
- added angular interaction between neighboring spherocylinders (in chain)
angle1 is angle between sc directions and angle2 ins angle between the patches
*/
/*-------------------------------------------------------------------------------
Version 3.3
-external potantial can be added as a part of topology - it can be hard or attractive wall
*/
/**
* Changes made by Noah S. Bieler and Robert Vacha:
*
* New version 3.2
*
* - The length has now to be specified in the topology file, but they are not
* allowed to differ from each other. The option file shall no longer contain
* a length option.
* - The particles can now switch their type based on the chemical potential
* delta_mu (= energy difference from state 2 to state 1).
* - For that a new option was introduced: Average attempts per sweep to switch
* a type.
* - A lot of variables are now combined in either topo, sim or conf. The rule
* should be:
* > topo: Everything that belongs to the topology and that should not change
* during the game.
* > sim: Options and stuff, that has to do with the simulation. (Maybe the
* current target and so should be saved in there as well)
* > conf: What changes every step concerning the particles and the box or
* in other words: what has been read from conf.init
* - added a cluster determing routine => sim->clusterlist + sim->clusters
* - added macros for TRUE and FALSE
* - Added Option for the random seed
* - Basic Neighbour list implemented
* - New types: chiral CPSC (CHCPSC) and chiral PSC (CHPSC) and their interactions
*/
/*--------------------------------------------------------------------------------
sc31.c
Patchy Spherocylinder Version 3.1
Wang-Landau method of free energy calculations
It is set in options file as:
O = none, 1 = z-distance of 1st paticle from system CM, 2 = hole in xyplane of SCA = membrane hole
It reads a file wl.dat and write wl-new at the end. There is value of alpha at the first line and then
there are three columns:
1- order parameter, 2- weights, 3- histogram
Interaction of spherocylinders is scaled based on the volume of attractive patch, the unit of one
is that two spheres of diameter sigma =1.0 are attracting each other by 1.0. Using this in interaction
among lipids and spherocylinders should be consistent.
Start up configuration "config.init" file has a box size at the first line now.
(I tested performance: compilation with optimization -O2 speed up 10%
rest has negligible effect including usage of static arrays instead of dynamic
most of the time consumes paire function.
6,519,638,177 :simulate
6,492,411,300 :energyone
5,705,685,593 :paire
542,561,887 :bondenergy
489,463,361 :eattractive11
450,443,970 :image
115,126,519 :erepulsive
*/
/* --------------------------------------------------------------------------------
sc3.c
Patchy Spherocylinder Version 3.0
Beads were added to the particle list.
bead(10) - repulsive
bead(11) - isotropocally attractive
-It is necessary to provide also a topology file (top.init)
-Particles are placed in chains according to the topology order including connections
-Particle arryas are allocated dynamicly on heap now
-dispacement and rotation are optimized for highest RMSD performace
-NPT ensemble with isotropic and anisotropic couplings, in pressure moves all
particles are rescaled with their center (chains are not rescaled with CM)
0 - anisotropic coupling, 1 - isotropic coupling, 2 - isotropic in xy z=const
bead types and their interactions
repulsive(10) purely repulsive shpere with WCA potential on closest distance
parameters: Patch repulsion sigma - defined where repulsion reaches zero
isotropic(11) - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between obejcts.
Parameters: distance of attractivity (should be at least
sigma*2^(1/6)) defines how far is attraction constant -e. After this distance
follows switch length on which attraction goes to zero as cos^2.
Rest as repulsive model.
sc2.c
Patchy Spherocylinder Version 2.0
It is possible to make chains of spherocylinders that are connected through
hemispherical caps by harmonic bond. There are two parameters eq distance and
strength of harmonic spring, note that units are in 1 kT/e, the MC strength of bond
is changing with parameter temperature..
Patchy Spherocylinder Version 1.0
Includes diffferent types of possible interactions:
repulsive(0) - purely repulsive spherocylinder with WCA potential on closest distance.
parameters: Patch repulsion sigma - defined where repulsion reaches zero.
isotropic(1) - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between spherocylinders.
Parameters: distance of patch, Interaction distance of patch (should be at least
sigma*2^(1/6)) defines how far is attraction constant -e. After this distance
follows Switch length on which attraction goes to zero as cos^2. Rest as repulsive model.
patchy(2) - Attractive potential in limited to an angular wedge on spherocylinder. Patch
goes all the way through, making also hemispherical caps on end attractive.
Parameters:Anglular part has a parameter defining it size "Angular size of patch
(degrees)" and witdh of switch function "Angular switch off of patch (degrees)" on which
attraction reaches zero - it is a linear function. Rest as isotropic model.
cylindrical(3) - Attractive potential in limited to an angular wedge on cylindrical part
of spherocylinders. The hemispherical caps on ends are repulsive. Rest as
patchy model.
Note particles are inside numbered from 0, there is prealocated size of particles MAXN
because in future there can be grand canonical ensamble and number of particles may vary
Follows mc of hard wall spherocylinder version 7 by Mark Miller -description below
sc.c
Version 1
Performs basic constant volume MC simulation of hard spherocylinders with rigid
cuboidal boundary conditions.
Run parameters are read in from the file "options". The template for this file
appears at the end of the code. The values must be inserted before the colons.
The initial configuration is read from the file "config.init". The first line contain size
of box The format for the file is nine columns: three for the positions and three for the
direction vector and three for direction of pathc. The direction vectors are normalised
after being read in. The configuration is checked for particle overlaps.
The unit of length is taken as the spherocylinder diameter. Hence the ratio
L/D is equal to the length of the cylinder.
Order parameters for nematic and smectic order are evaluated. The nematic order
parameter is related to the coefficient of the quadratic term in the Legendre
expansion of the orientational distribution function. Any smectic order is
assumed to be directed along the z axis, and is detected by the coefficients
of the Fourier expansion of the position distribution function.
MM 12.vii.01
..................................................................................
Version 2
The aspect ratio of the box may now fluctuate, keeping the volume constant.
Two new parameters are required in the options file to specify the average number
of attempted shape changes per sweep, and the initial maximum trial change in
a box dimension.
Shape changes are made by picking one of the three box lengths at random,
making a random change, evenly distributed between plus and minus a finite
interval, choosing a second direction and doing the same, then determining
the new length in the remaining direction from the condition of constant
volume.
The step-size equilibration period is now split into three parts: displacement,
rotation, and shape change.
The most important change to the code is that the particle coordinates are
now stored as fractions of the box dimensions. However, input and output
configurations are still communicated in units of the cylinder diameter, D=1.
Note that the displacement maximum step size is now specified as a fraction of
the box length, not as an absolute distance.
MM 18.vii.01
..................................................................................
Version 3
Constant pressure MC. The volume may fluctuate. Volume changes are attempted
by altering just one box length at a time, chosen at random. The running
average of the density is calculated and reported.
MM 24.vii.01
..................................................................................
Version 7
The composite translation-plus-rotation moves have been split into separate
move types, each of which is attempted with equal probability. This enables
acceptance ratios to be accumulated separately for these degrees of freedom, so
that maximum step sizes can be adjusted more sensibly.
A few other things have been tidied up, such as defining structures for the
book-keeping of statistics and acceptance ratios.
MM 9.v.02
--------------------------------------------------------------------------------*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#ifdef MACOS
# include "getline.h"
#endif
#ifdef MPI
# include <mpi.h>
#endif
/* Macros for DEBUG messages */
#ifdef DEBUGGING_INIT
#define DEBUG_INIT(...) fprintf(stderr, "DB in INIT: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr);
#else
#define DEBUG_INIT(...)
#endif
#ifdef DEBUGGING_SIM
#define DEBUG_SIM(...) fprintf(stderr, "DB in SIM: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr);
#else
#define DEBUG_SIM(...)
#endif
#ifdef DEBUGGING
#define DEBUG(...) fprintf(stderr, "DB: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr);
#else
#define DEBUG(...)
#endif
/* End of DEBUG macros */
/* With pairlist ? */
#define WITH_PAIRLIST
/* Boolean Macros */
#define BOOL int
#define TRUE 1
#define FALSE 0
/* End of Boolean Macros */
#define MAXF 20 /* Maximum number of Fourier terms */
#define MAXN 14000 /* Maximum number of particles */
#define MAXCHL 10 /* Maximum length of chain */
#define ZEROTOL 1.0e-12 /* Dot products below ZEROTOL are deemed zero */
#define ZEROTOL2 1.0e-8 /* numbers below ZEROTOL are deemed zero */
#define PI 3.141592653589793238462643383279 /* pi */
#define PIH 1.57079632679489661923132169163975 /* pi half*/
/*Particle types*/
#define SC 10 /*spherocylinder*/
#define SCN SC+0 /*spherocylinder non-attractive*/
#define SCA SC+1 /*spherocylinder isotropicaly attractive*/
#define PSC SC+2 /*spherocylinder with patchy attraction*/
#define CPSC SC+3 /*spherocylinder with cylindrical patchy attraction*/
#define CHPSC SC+4 /* chiral psc */
#define CHCPSC SC+5 /* chiral cpsc */
#define TPSC SC+6 /*spherocylinder with two patches*/
#define TCPSC SC+7 /*spherocylinder with two cylindrical patches*/
#define TCHPSC SC+8 /* chiral 2psc */
#define TCHCPSC SC+9 /* chiral 2cpsc */
#define SP 30 /*sphere - should be over all apherocylinders*/
#define SPN SP+0 /* sphere non-attractive*/
#define SPA SP+1 /* spherocylinder isotropicaly attractive*/
#define MAXT 30 /* Maximum number of types we have*/
#define MAXMT 100 /* Maximum number of molecular types */
/*Reading topology*/
#define SMSTR 64 /* Small string length */
#define STRLEN 400 /* maximum length of line*/
#define CONTINUE '\\' /* symbol for line continue*/
#define COMMENTSIGN '#' /* symbol for comment*/
#define OPENKEY '[' /* starting sign for keyword*/
#define CLOSEKEY ']' /* ending sign for keyword*/
#define SEPARATOR ':' /* sign for separator*/
#define OPENMOL '{' /* starting sign for molecules*/
#define CLOSEMOL '}' /* ending sign for molecules*/
#define BOXSEP 'x' /* extraction of box*/
/* Wang Landau method */
#define WL_GERR 0.0001 /* Max roughnes in histogram */
#define WL_ALPHATOL 0.0000001 /* Covergence crietria for detailed balance */
#define WL_MINHIST 1000 /* Minimum histogram sampling for considering roughness */
#define WL_ZERO 0.000000000000 /* Zero for histogram with current weights*/
#define WL_CONTACTS 36.0 /* Square distance under which are particles in contact */
/* Math */
#define DOT(a,b) ((a).x * (b).x + (a).y * (b).y + (a).z * (b).z) /* Dot product */
#define AVER(a,b) ((a+b)*0.5) /* Arithmetic average*/
#define ROUND(a) (a > 0.0) ? floor(a + 0.5) : ceil(a - 0.5); /* Round double*/
#define PMONE(a) (1 - 2 * a) /* Takes 1 or 0, return +-1 */
/* Acceptance ratio */
#define RATIO(a) ( ((a).acc+(a).rej) > 0 ? 1.0*(a).acc/((a).acc+(a).rej) : 0.0 )
#define INBOX(a,b) ( a > 0 ? modf(a,&b) : modf(a,&b)+1 )
/*................................................................
Structure definitions
*/
struct vector { /* Define a 3D vector structure */
double x;
double y;
double z;
};
struct quat { /* Define a quaternion structure */
double w;
double x;
double y;
double z;
};
struct particles { /* Define a particle */
struct vector pos; /* Position vector */
struct vector dir; /* Unit direction vector of axis */
struct vector patchdir[2]; /* Vector defining orientation of patch */
struct vector patchsides[4]; /* Vector defining sides of patch */
struct vector chdir[2]; /* Direction for chirality - keep in memory to increase speed */
long chaint; /* Chain type*/
long chainn; /* Chain number*/
int type; /* Type of the particle */
int switchtype; /* With which kind of particle do you want to switch?*/
double delta_mu; /* Chemical potential for the switch */
int switched; /* 0: in initial stat; 1: in the switched stat */
};
struct ia_param{ /* Contatins properties and parameters of particle types */
char name[SMSTR]; /* The name of the particle type */
char other_name[SMSTR]; /* The name of the particle type */
int geotype[2]; /* The geometrical type: spherocylinder (0-repulsive, 1-isotropic, 2-patchy, 3-cylindrical)
or sphere (0-repulsive, 1-isotropic) */
double sigma; /* Repulsion wca*/
double epsilon; /* Repulsion strength*/
double pdis; /* Interaction distance of patch */
double pswitch; /* Switch of distance of patch */
double pangl[4]; /* angular size of patch as was specifid in input */
double panglsw[4]; /* angular size of patchswitch as was specifid in input */
double pcangl[4]; /* cosine of half size angle - rotation from patch direction to side */
double pcanglsw[4]; /* cosine of half size angle plus switch - rotation from patch direction to side */
double rcut; /* Cutoff for attraction */
double rcutwca; /* Cutoff for repulsion*/
double pcoshalfi[4]; /* Cosine of half angle going to side of interaction */
double psinhalfi[4]; /* Sine of half angle going to side of interaction -useful for quaterion rotation */
double csecpatchrot[2]; /* Cosine of Rotation of second patches in 2psc models*/
double ssecpatchrot[2]; /* Sine of Rotation of second patches in 2psc models*/
double volume; /* Volume of particle for geometrical center calculations*/
double pvolscale; /* Scale of patch volume size*/
double len[2]; /* Length of the PSC */
double half_len[2]; /* Half length of the PSC */
double chiral_cos[2]; /* Coctains the cosinus for the chiral rotation of the patch */
double chiral_sin[2]; /* Contains the sinus for the chiral rotation of the patch */
};
struct interacts { /* Parameters pased to functions of interaction calculation */
double dist; /* closest distance */
struct vector distvec; /* vector of closes distance */
struct particles * part1; /* particle 1 */
struct particles * part2; /* particle 2 */
struct vector box; /* box size */
struct ia_param * param; /* interaction parameters */
struct vector r_cm; /* vector connecting center of masses */
double distcm; /* distance between center of masses */
double dotrcm; /* square size of r_cm*/
double contt; /* closest point on spherocylinder to sphere */
};
struct chainparams { /*Parameters for inner interaction in chains*/
double bond1eq; /* Equilibrium distance of harmonic bond between nearest neighbours*/
double bond1c; /* Spring constant for harmonic bond between nearest neighbours*/
double bond2eq; /* Equilibrium distance of harmonic bond between second nearest neighbours*/
double bond2c; /* Spring constant for harmonic bond between second nearest neighbours*/
double bonddeq; /* Equilibrium distance of directional harmonic bond between the nearest neighbours*/
double bonddc; /* Spring constant for directional harmonic bond between the nearest neighbours*/
double angle1eq; /* Equilibrium angle between two spherocylinders -neerest neighbours*/
double angle1c; /* Spring constant angle between two spherocylinders -nearest neighbours*/
double angle2eq; /* Equilibrium angle between two spherocylinder patches -nearest neighbours*/
double angle2c; /* Spring constant for angle between two spherocylinder patches -nearest neighbours*/
};
struct molecule { /* This structure is for io only */
char * name; /* The name of the molecule */
long * type; /* The type of the particle */
long * switchtype; /* The switchtype of the particle */
double * delta_mu; /* The chemical potential for the switch */
};
struct disp { /* Define step size and acceptance ratio statistics */
double mx; /* Maximum value displacement, cos(angle), etc. */
double angle; /* Maximum angle, since in .mx cos(angle) is saved */
long acc; /* Number of accepted steps */
long rej; /* Number of rejected steps */
double oldrmsd; /* Averaged mx value in previous equilibration round */
double oldmx; /* Change in mx in last equlibrium step */
};
struct stat { /* Define statistics counters */
double sum;
double sum2;
long samples;
double mean;
double rms;
};
struct meshs { /* Mesh for hole order parameter */
int dim[2]; /* Mesh dimensions */
int *data; /* Mesh data */
int *tmp; /* tmpporary list for hole search */
};
struct wls { /* Wang landau method (wl) */
double *weights; /* Array of weights for wl method */
long *hist; /* Array of histogram for wl method */
long length[2]; /* Length of above arrays */
double dorder[2]; /* Increments of order parameter */
double minorder[2]; /* Minimum order parameter */
double alpha; /* Current modifier of weights */
long currorder[2]; /* Walue of current order parameter*/
long neworder[2]; /* wl order parameter in new step */
long max; /* wl maximum of histogram */
long min; /* wl minimum of histogram */
double wmin; /* weights minimum */
int wlmdim; /* Dimwnsionality of wang landau */
int wlmtype; /* Atom type for the Wang landau method (wl) */
double wl_meshsize; /* Size of mesh bin for hole order paremeter*/
struct meshs mesh; /* Mesh for hole order */
struct meshs origmesh; /* Mesh store for rejected moves */
long * radiushole; /* Array for hole radius around origin */
long * radiusholeold; /* Array for hole radius around origin-bigmove */
long radiusholemax; /* Size of array for hole radius*/
long partincontact; /* Number of particles in contact */
long partincontactold; /* Number of particles in contact - old for move*/
};
struct pairs{ /* The structure holding the particle numbers of the pairs and the number of pairs */
long num_pairs; /* The number of pairs */
long * pairs; /* The paritcle numbers of the paris */
};
struct pairlist{ /* I think, this is done too complicated: just sim->pairs[npart] should be enough */
struct pairs * list; /* contains the pairlist of all paritcles */
};
struct cluster{ /* contains all the particles of one cluster */
long npart;
long * particles;
};
struct exters{
BOOL exist; /* existence of external potential*/
double thickness; /* external wall thicnkess*/
double epsilon; /* depth of attraction*/
double attraction; /* distance of attraction*/
double sqmaxcut; /* distance when nothing can interact*/
struct ia_param interactions[MAXT]; /* Interaction parameters with particle types generated from above params*/
};
struct topo{ /* It would be nice, if this struct would contain all the topo stuff in the end*/
long * switchlist; /* List containing the number of all the particles with switchtypes */
long n_switch_part; /* number of particles with switchtype */
double sqmaxcut; /* square of distance over which even spherocylinders cannot interact (distance between CM) */
double maxcut; /* distance over which even spherocylinders cannot interact (distance between CM) */
long conlist[MAXN][4]; /* Connectivity list, we have connection to tail and head and secon neighbours so far*/
long chainlist[MAXN][MAXCHL]; /* List of chains*/
long chainnum; /* Number of chains */
struct chainparams chainparam[MAXMT]; /* parameters for chains */
struct ia_param ia_params[MAXT][MAXT]; /* parametrization of particles for all interations*/
long npart; /* Number of particles */
struct exters exter; /* external potential - wall */
};
struct sim{ /* Should contain mostly all the simulation options and variables, that can change in every step. */
double press; /* Pressure */
double paralpress; /* Parallel pressure for replica exachnge*/
double dpress; /* Pressure change for replica exchange*/
double shave; /* Average number of volume changes to attempt per sweep */
double shprob; /* Probability of attempting a volume change */
double chainprob; /* Average number of chain move attempt per sweep */
double switchprob; /* Average number of type switch attempt per sweep */
int pairlist_update; /* Number of sweep per upedating the pairlist */
double temper; /* Temperature*/
double paraltemper; /* Temperature for parallel tempering */
double dtemp; /* Temprature step */
int ptype; /* Type of pressure coupling*/
long adjust; /* Number of sweeps between step size adjustments */
long movie; /* Number of sweeps between movie frames */
long nequil; /* Number of equilibration sweeps */
long nsweeps; /* Number of production sweeps */
long paramfrq; /* Number of sweeps between order parameter samples */
long report; /* Number of sweeps between statistics reports */
// long terms; /* Number of Fourier terms as smectic order parameters */
long nrepchange; /* Number of sweeps between replica exchanges */
int wlm[2]; /* Wang landau method (wl) */
struct disp edge; /* Maximum box length change and statistics */
struct disp rot[MAXT]; /* Maximum rotation and statistics */
struct disp trans[MAXT]; /* Maximum translation and statistics*/
struct disp chainm[MAXMT]; /* Maximum translation for chain and statistics*/
struct disp chainr[MAXMT]; /* Maximum rotation for chain and statistics */
struct disp mpiexch; /* MPI statistics*/
struct pairs * pairlist; /* The pairlist */
long write_cluster; /* Number of sweeps per writing out cluster info */
long * clusterlist; /* clusterlist[i] = cluster index of particle i */
struct cluster * clusters; /* informations about the single clusters */
double *clustersenergy; /* list of energies of clusters*/
long num_cluster; /* number of single clusters */
long * clusterstat; /* Statistics about the seize of cluster */
long max_clust; /* maximal clustersize */
struct wls wl; /* Wang landau data */
int mpirank; /* MPI number for given process*/
int mpinprocs; /* MPI number of processes */
};
typedef enum { /* Holds the type of a variable in struct option */
Int,
Int2,
Long,
Double
} Type;
typedef struct { /* for reading in the options */
char *id; /* The name of the value in the option file*/
Type type; /* The type (int, double or long) */
BOOL set; /* Wheter the variable has been set */
void *var; /* The variable */
} Option;
struct conf{ /* Configuration of the system*/
struct particles * particle; /* All particles*/
struct vector box; /* Box size*/
double sysvolume; /* Something like total mass*/
struct vector syscm; /* System center of mass*/
};
struct filenames {
/* input files */
char configurationinfile[30];
char topologyfile[30];
char optionsfile[30];
char wlinfile[30];
/* output files */
char configurationoutfile[30];
char moviefile[30];
char wloutfile[30];
char statfile[30];
char clusterfile[30];
char clusterstatfile[30];
char energyfile[30];
};
struct mpiexchangedata{ /* extra type for mpi communication*/
struct vector box; /* box of configuration */
double energy; /* energy of configuration */
double volume; /* volume of configuration */
int accepted; /* bool if accepted */
struct vector syscm; /* system CM of configuration */
long radiusholemax; /* size of array for WL*/
long wl_order[2]; /* wang-landau order parameter*/
};
#ifdef MPI
MPI_Datatype MPI_vector, MPI_Particle, MPI_exchange;
#endif
const struct stat nullstat = {0.0, 0.0, 0, 0.0, 0.0};
long seed = 6; /* Seed for random number generator */
/*..............................................................................*/
int main(int argc, char **argv)
{
DEBUG("start");
FILE *outfile,*mov; /* Handle for writing configuration */
double (* intfce[MAXT][MAXT])(struct interacts *); /*array of interaction functions*/
struct topo topo; /* will maybe contain all the topo stuff in future */
struct sim sim; /* Should contain the simulation options. */
struct conf conf; /* Should contain fast changing particle and box(?) information */
struct filenames files;
int memoryalloc(struct conf * conf);
int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim);
void read_options(struct sim* sim, char filename[30]);
void init_top(struct topo *, struct conf * conf, struct sim * sim, char filename[30]);
void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]);
void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo);
void draw(FILE *, struct conf * conf, struct topo * topo);
void printeqstat(struct disp *, double, int);
void simulate(long nsweeps, long adjust, long paramfrq, long report,
double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files);
void init_pairlist(struct topo * topo, struct sim * sim);
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf);
void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo);
int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) );
int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf);
int print_clusters(FILE * stream, BOOL decor, struct sim * sim);
int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim);
int sort_clusterlist(struct topo * topo, struct sim * sim);
printf ("\nPatchy Spherocylinders version 3.5 ");
sprintf(files.configurationinfile, "config.init");
sprintf(files.configurationoutfile, "config.last");
sprintf(files.optionsfile, "options");
sprintf(files.topologyfile, "top.init");
sprintf(files.moviefile, "movie");
sprintf(files.wlinfile, "wl.dat");
sprintf(files.wloutfile, "wl-new.dat");
sprintf(files.statfile, "stat.dat");
sprintf(files.clusterfile, "cluster.dat");
sprintf(files.clusterstatfile, "cluster_stat.dat");
sprintf(files.energyfile, "energy.dat");
#ifdef MPI
FILE *infile;
printf(" MPI version");
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &(sim.mpinprocs) );
MPI_Comm_rank(MPI_COMM_WORLD, &(sim.mpirank) );
sprintf(files.configurationoutfile, "%dconfig.last", sim.mpirank);
sprintf(files.moviefile, "%dmovie", sim.mpirank);
sprintf(files.wloutfile, "%dwl-new.dat", sim.mpirank);
sprintf(files.clusterfile, "%dcluster.dat", sim.mpirank);
sprintf(files.clusterstatfile, "%dcluster_stat.dat", sim.mpirank);
sprintf(files.energyfile, "%denergy.dat", sim.mpirank);
sprintf(files.statfile, "%dstat.dat", sim.mpirank);
/*test if there is a specific input configuration for mpi run*/
sprintf(files.configurationinfile, "%dconfig.init", sim.mpirank);
infile = fopen(files.configurationinfile, "r");
if (infile != NULL)
fclose (infile);
else sprintf(files.configurationinfile, "config.init");
/*test if there is a specific input wang-landau for mpi run*/
sprintf(files.wlinfile, "%dwl.dat", sim.mpirank);
infile = fopen(files.wlinfile, "r");
if (infile != NULL)
fclose (infile);
else sprintf(files.wlinfile, "wl.dat");
#endif
printf ("\n-------------------------------------\n");
printf ("Reading options...\n");
read_options(&sim,files.optionsfile);
init_top(&topo, &conf, &sim,files.topologyfile);
if (topo.chainnum ==0) {
/*no chain make the probability of moving them 0*/
if (sim.chainprob > 0)
printf ("No chains... chain move probability set to 0.\n");
sim.chainprob = 0;
}
printf ("\nReading configuration...\n");
init_config(&topo, &conf, &sim, files.configurationinfile);
printf ("Equilibration of maximum step sizes: %ld sweeps\n", sim.nequil/2);
fflush (stdout);
if ( sim.wlm[0] > 0 ) {
outfile = fopen(files.wlinfile, "r");
if (outfile == NULL) {
printf ("ERROR: Cannot open file for Wang-Landau method (%s).\n",files.wlinfile);
memorydealloc(&conf, &topo, &sim);
exit(1);
}
fclose (outfile);
}
/* Empty movie file */
mov = fopen("movie", "w");
fclose (mov);
printf ("\nInitializing energy functions...\n");
init_intfce(intfce, &topo);
if (sim.pairlist_update) {
init_pairlist(&topo, &sim);
}
if (sim.nequil) {
printf("\nStart equilibration...\n");
simulate(sim.nequil/2, sim.adjust, 0, 0, intfce, &topo, &sim, &conf,&files);
simulate(sim.nequil/2, 0, 0, 0, intfce, &topo, &sim, &conf,&files);
printf (" Equilibrated maximum displacement / acceptance ratio: \n");
printeqstat(sim.trans,2.0,MAXT);
printf (" Equilibrated maximum rotation / acceptance ratio: \n");
printeqstat(sim.rot,1.0,MAXT);
printf (" Equilibrated maximum box length change / acceptance ratio: \n");
printf (" %.6le / %.6le\n", sim.edge.mx/2.0,RATIO(sim.edge));
printf (" Equilibrated maximum displacement of chain / acceptance ratio: \n");
printeqstat(sim.chainm,2.0,MAXMT);
printf (" Equilibrated maximum rotation of chain / acceptance ratio: \n");
printeqstat(sim.chainr,1.0,MAXMT);
printf ("\n");
printf ("Further equilibration of configuration: %ld sweeps\n", sim.nequil/2);
fflush (stdout);
outfile = fopen("config.eq", "w");
fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z);
draw (outfile, &conf, &topo);
fclose (outfile);
printf (" Equilibrated configuration written to config.eq\n");
printf (" Box dimensions: %.10lf, %.10lf, %.10lf\n\n", conf.box.x, conf.box.y, conf.box.z);
}
printf ("Production run: %ld sweeps\n\n", sim.nsweeps);
fflush (stdout);
simulate(sim.nsweeps, 0, sim.paramfrq, sim.report, intfce, &topo, &sim, &conf,&files);
#ifdef MPI
printf (" MPI replica changeT / changeP / acceptance ratio: \t %.6lf / %.6lf / %.6lf\n\n", sim.mpiexch.mx,sim.mpiexch.angle,RATIO(sim.mpiexch));
#endif
outfile = fopen(files.configurationoutfile, "w");
fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z);
draw (outfile, &conf, &topo);
fclose (outfile);
// For testing the pairlist
//gen_pairlist(&topo, &sim, &conf);
//FILE * fpairlist;
//fpairlist = fopen("pairlist.dat", "w");
//print_pairlist(fpairlist, &sim, &topo);
//fclose(fpairlist);
//printf("sqmaxcut = %lf\n", topo.sqmaxcut);
//// For testing the cluster algorithm
//gen_clusterlist(&topo, &sim, &conf);
//print_clusterlist(stdout, TRUE, &topo, &sim, &conf);
//sort_clusterlist(&topo, &sim);
//print_clusters(stdout, TRUE, &sim);
//print_clusterstat(stdout, TRUE, &sim);
if (memorydealloc(&conf, &topo, &sim))
exit(1);
#ifdef MPI
MPI_Finalize();
#endif
printf ("\nDone\n\n");
return 0;
}
/*..............................................................................*/
/*.........................SIMULATION RUN.......................................*/
/*..............................................................................*/
void simulate(long nsweeps, long adjust, long paramfrq, long report,
double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files)
{
long i,j,wli;
long next_adjust; /* Next sweep number for step size adjustment */
long next_calc; /* Next sweep number for order parameter calculation */
long next_dump; /* Next sweep number for reporting statistics */
long next_frame; /* Next sweep number for dumping a movie frame */
long step; /* Step number within a given sweep */
long sweep; /* Current sweep number */
//struct stat nem; /* Nematic order parameter */
//struct stat vol; /* Volume statistics */
//struct stat shapex, shapey, shapez; /* Box shape statistics */
//struct stat smec[MAXF]; /* Smectic order parameters (Fourier coeeficients) */
FILE *mf; /* Handle for movie file */
FILE *cl_stat, *cl, *cl_list; /* Handle for cluster statistics */
FILE *ef, *statf; /* Handle for energy file and statistical file*/
double edriftstart; /* Energy drift calculation - start */
double edriftchanges; /* Energy drift calculation - accumulate all changes through moves */
double edriftend; /* Energy drift calculation - end */
double pvdriftstart; /* PV drift calculation - start */
double pvdriftend; /* PV drift calculation - end */
double volume; /* volume of box*/
double moveprobab; /* random number selecting the move*/
/* function declarations */
//double nematic(long, struct particles *);
double ran2(long *);
//double smectic(long, struct particles *, long);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
void accumulate(struct stat *, double);
void draw(FILE *, struct conf * conf, struct topo * topo);
void optimizestep(struct disp *, double, double);
void optimizerot(struct disp *, double, double);
void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf );
int wlinit(struct wls *, char filename[30]);
int wlwrite(struct wls *, char filename[30]);
int wlend(struct wls *);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
int mesh_end(struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf * conf,int);
void mesh_print (struct meshs *);
void masscenter(long, struct ia_param [MAXT][MAXT], struct conf * conf);
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf);
int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list,
BOOL decor, long sweep, struct sim * sim, struct topo * topo,
struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *));
double particlemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double chainmove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long sweep);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long radiushole_position(double, struct sim *,int);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
double alignment_order(struct conf * conf, struct topo * topo);
int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim);
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
/* Opening files for cluster statistics */
cl_stat = cl = cl_list = ef = statf = NULL;
if(sim->write_cluster){
// Empty file
cl_stat = fopen(files->clusterstatfile, "w");
fclose(cl_stat);
cl_stat = fopen(files->clusterstatfile, "a");
// Empty file
cl = fopen(files->clusterfile, "w");
fclose(cl);
cl = fopen(files->clusterfile, "a");
}
/* write energy*/
if (report <= nsweeps){
// Empty file
ef = fopen(files->energyfile, "w");
fclose(ef);
ef = fopen(files->energyfile, "a");
fprintf (ef, "# sweep energy\n");
statf = fopen(files->statfile, "w");
fclose(statf);
statf = fopen(files->statfile, "a");
fprintf (statf, "# sweep volume\n");
}
/*=== Initialise counters etc. ===*/
// double pvolume; /* Volume of all particles*/
/* pvolume =0.0;
for (i=0;i < topo->npart;i++) {
if (conf->particle[i].type>=0 )
pvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume;
}*/
sim->shprob = sim->shave/(double)topo->npart;
for (i=0;i<MAXT;i++){
sim->rot[i].acc = 0;
sim->rot[i].rej = 0;
sim->rot[i].oldrmsd = 0;
sim->rot[i].oldmx = 0;
sim->trans[i].acc = 0;
sim->trans[i].rej = 0;
sim->trans[i].oldrmsd = 0;
sim->trans[i].oldmx = 0;
}
for (i=0;i<MAXMT;i++){
sim->chainm[i].acc = 0;
sim->chainm[i].rej = 0;
sim->chainm[i].oldrmsd = 0;
sim->chainm[i].oldmx = 0;
sim->chainr[i].acc = 0;
sim->chainr[i].rej = 0;
sim->chainr[i].oldrmsd = 0;
sim->chainr[i].oldmx = 0;
}
//(*edge).acc = (*edge).rej = (*edge).oldrmsd = (*edge).oldmx = 0;
sim->edge.acc = sim->edge.rej = sim->edge.oldrmsd = sim->edge.oldmx = 0;
sim->mpiexch.acc = sim->mpiexch.rej = sim->mpiexch.oldrmsd = sim->mpiexch.oldmx = 0;
/*Initialize some values at begining*/
partvecinit(topo,sim,conf);
next_adjust = adjust;
next_calc = paramfrq;
next_dump = report;
next_frame = sim->movie;
//nem = vol = shapex = shapey = shapez = nullstat;
//for (i=0; i<MAXF; i++) smec[i] = nullstat;
if (sim->movie > 0) {
mf = fopen(files->moviefile, "a");
} else {
mf = NULL;
}
sim->wl.wl_meshsize = 0;
sim->wl.radiushole = NULL;
sim->wl.radiusholeold = NULL;
sim->wl.radiusholemax = 0;
sim->wl.partincontactold = 0;
sim->wl.partincontact = 0;
sim->wl.wlmdim = 0;
sim->wl.wlmdim = 0;
sim->wl.length[0]=0;
sim->wl.length[1]=0;
sim->wl.currorder[0]=0;
sim->wl.currorder[1]=0;
sim->wl.neworder[0]=0;
sim->wl.neworder[1]=0;
sim->wl.weights = NULL;
sim->wl.hist = NULL;
masscenter(topo->npart,topo->ia_params, conf);
/* Initialization of wang-landaou method*/
if ( sim->wlm[0] >0 ) {
if (wlinit(&sim->wl,files->wlinfile) != 0)
return;
sim->wl.wlmdim = 1 ;
if ( sim->wlm[1] > 0 )
sim->wl.wlmdim = 2 ;
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1:
masscenter(topo->npart,topo->ia_params, conf);
sim->wl.currorder[wli] = z_order(&sim->wl,conf,wli);
break;
case 2:
sim->wl.wl_meshsize = (topo->ia_params[sim->wl.wlmtype][sim->wl.wlmtype].sigma) / 3.0; // TODO
sim->wl.mesh.data = NULL;
sim->wl.mesh.tmp = NULL;
sim->wl.origmesh.data = NULL;
sim->wl.origmesh.tmp = NULL;
sim->wl.currorder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 3:
sim->wl.currorder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] );
break;
case 4:
sim->wl.currorder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
masscenter(topo->npart,topo->ia_params, conf);
sim->wl.radiusholemax = 0;
sim->wl.radiushole = NULL;
sim->wl.radiusholeold = NULL;
sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
sim->wl.radiusholemax = 0;
sim->wl.radiushole = NULL;
sim->wl.radiusholeold = NULL;
sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.currorder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.currorder[wli] = 0;
break;
}
if ( (sim->wl.currorder[wli] >= sim->wl.length[wli] ) || (sim->wl.currorder[wli] < 0) ) {
printf("Error: starting Wang-Landau method with order parameter %f out of range(%f - %f)\n\n", sim->wl.dorder[wli]*sim->wl.currorder[wli] + \
sim->wl.minorder[wli], sim->wl.minorder[wli], sim->wl.minorder[wli]+sim->wl.dorder[wli]*sim->wl.length[wli] );
wlend(&sim->wl);
if (memorydealloc(conf, topo, sim))
exit(1);
exit(2);
return;
}
}
if (sim->wl.alpha < WL_ALPHATOL/100) sim->wl.alpha = WL_ZERO;
fflush (stdout);
}
double e,e2;
for(int i=0; i< 1/*topo->npart-1*/; ++i) {
for(int j=i+1; j< topo->npart; ++j) {
e = paire(i, j, intfce, topo, conf);
e2 = paire(j, i, intfce, topo, conf);
if(e < 1000.0)
printf("%.5lf %.5lf\n", e, e2);
else printf("%lf\n", 1000.0);
}
printf("\n");
}
exit(0);
/*do moves - START OF REAL MC*/
if(sim->pairlist_update){
gen_pairlist(topo, sim, conf); // Does that solve the problem?
}
/*do energy drift check - start calculation*/
volume = conf->box.x * conf->box.y * conf->box.z;
edriftstart = calc_energy(0, intfce, 0, topo, conf, sim,0);
pvdriftstart = sim->press * volume - (double)topo->npart * log(volume) / sim->temper;
//printf("starting energy: %.15f \n",calc_energy(0, intfce, 0, topo, conf, sim,0));
//printf("press: %.15f\n",sim->press * volume - (double)topo->npart * log(volume) / sim->temper);
edriftchanges = 0.0;
for (sweep=1; sweep <= nsweeps; sweep++) {
// Try replica exchange
if((sim->nrepchange) && (sweep % sim->nrepchange == 0)){
edriftchanges += replicaexchangemove(topo,sim,conf,intfce,sweep);
}
// Generate the pairlist
if((sim->pairlist_update) && (sweep % sim->pairlist_update == 0)){
gen_pairlist(topo, sim, conf);
}
//normal moves
for (step=1; step <= topo->npart; step++) {
moveprobab = ran2(&seed);
if ( moveprobab < sim->shprob) {
/* pressure moves*/
edriftchanges += pressuremove(topo,sim,conf,intfce);
} else {
if (moveprobab < sim->shprob + sim->chainprob) {
/* single particle moves*/
edriftchanges += chainmove(topo,sim,conf,intfce);
}
else if (moveprobab < sim->shprob + sim->chainprob + sim->switchprob){
/*=== This is an attempt to switch a type ===*/
edriftchanges += switchtypemove(topo,sim,conf,intfce);
} else {
/* single particle moves*/
edriftchanges += particlemove(topo,sim,conf,intfce);
} /* end of else next to chain moves */
} /* end of else next to volume moves */
}
/**** End of step loop for this sweep ****/
/*=== Start of end-of-sweep housekeeping ===*/
/* Adjustment of maximum step sizes during equilibration */
if (sweep == next_adjust) {
for (i = 0; i < MAXT ;i++) {
if ((sim->trans[i].acc > 0)||(sim->trans[i].rej >0))
optimizestep (sim->trans + i, 1.5, 0.0);
if ((sim->rot[i].acc > 0)||(sim->rot[i].rej >0))
optimizerot (sim->rot + i, 5.0, 0.01);
}
for (i = 0; i < MAXMT; i++) {
if ((sim->chainm[i].acc > 0)||(sim->chainm[i].rej > 0))
optimizestep (sim->chainm + i, 1.5, 0.0);
if ((sim->chainr[i].acc > 0)||(sim->chainr[i].rej > 0))
optimizerot (sim->chainr + i, 5.0, 0.01);
}
optimizestep (&(sim->edge), 1.0, 0.0);
next_adjust += adjust;
}
if ( (sim->wlm[0] > 0) && (sim->wl.alpha > WL_ZERO) && !(sweep % 1000) ) {
sim->wl.min = sim->wl.hist[0];
sim->wl.max = sim->wl.hist[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]];
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
for (j=1;j < sim->wl.length[1];j++) {
if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]];
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
}
}
if ( sim->wl.min > WL_MINHIST ) {
if ( sim->temper * log(sim->wl.max/sim->wl.min) < WL_GERR ) {
/*DEBUG
for (i=1;i<wl.length;i++) {
printf (" %15.8le %15ld %15.8f\n",sim->wl.weights[i],sim->wl.hist[i],particle[0].pos.z);
fflush(stdout);
}
*/
if ( sim->wl.alpha < WL_ALPHATOL) {
printf("\nF I N I S H E D\n\n");
fflush (stdout);
break;
}
sim->wl.alpha/=2;
printf("%f \n", sim->wl.alpha);
fflush (stdout);
sim->wl.wmin = sim->wl.weights[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
sim->wl.hist[i+j*sim->wl.length[0]] = 0;
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
for (j=1;j < sim->wl.length[1];j++) {
sim->wl.hist[i+j*sim->wl.length[0]] = 0;
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
}
}
}
}
}
if (!(sweep % 10000)) {
/*reinitialize pach vectors to avoid cummulation of errors*/
partvecinit(topo,sim,conf);
}
/* Sampling of statistics */
if (sweep == next_calc)
{
/*s2 = nematic(npart, particle);
accumulate (&nem, s2);
for (i=0; i<terms; i++) {
ci = smectic(npart, particle, i+1);
accumulate (&smec[i], ci);
}
accumulate (&shapex, (*box).x);
accumulate (&shapey, (*box).y);
accumulate (&shapez, (*box).z);
volume = (*box).x * (*box).y * (*box).z;
accumulate (&vol, volume);
next_calc += paramfrq;
*/
}
/* Writing of statistics */
if (sweep == next_dump) {
/*printf ("Statistics after %ld sweeps:\n", sweep);
printf (" Mean and RMS fluctuation of S2: %13.8lf %13.8lf\n",
nem.mean, nem.rms);
for (i=0; i<terms; i++) {
printf (" Mean & fluc. Fourier coeff. %3ld: %13.8lf %13.8lf\n",
i+1, smec[i].mean, smec[i].rms);
}
printf (" Mean & fluc box dimensions: x %13.8lf %13.8lf\n",
shapex.mean, shapex.rms);
printf (" y %13.8lf %13.8lf\n",
shapey.mean, shapey.rms);
printf (" z %13.8lf %13.8lf\n",
shapez.mean, shapez.rms);
printf (" Mean & fluctuation volume: %13.8lf %13.8lf\n",
vol.mean, vol.rms);
printf (" Mean & fluc. volume over volume of particles: %13.8lf %13.8lf\n",
vol.mean/pvolume, vol.rms/pvolume);
printf ("\n");
fflush (stdout);
*/
fprintf (statf, " %ld; %.10lf\n", sweep, conf->box.x * conf->box.y * conf->box.z);
fprintf (ef, " %ld; %.10lf %f \n", sweep, calc_energy(0, intfce, 0, topo, conf, sim,0), alignment_order(conf,topo));
if (sim->wlm[0] > 0) {
wlwrite(&sim->wl,files->wloutfile);
}
next_dump += report;
}
/* Writing of movie frame */
if (sweep == next_frame) {
fprintf (mf, "%ld\n", topo->npart);
fprintf (mf, "sweep %ld; box %.10lf %.10lf %.10lf\n", sweep, conf->box.x, conf->box.y, conf->box.z);
draw (mf, conf, topo);
fflush (mf);
next_frame += sim->movie;
}
/* Writing out cluster statistics */
if(sim->write_cluster && (sweep % sim->write_cluster == 0)){
write_cluster(cl_stat, cl, cl_list, FALSE, sweep, sim, topo, conf, intfce);
}
/*=== End of housekeeping ===*/
}
/**** End of sweeps loop ****/
/*do energy drift check - at the end calculation*/
volume = conf->box.x * conf->box.y * conf->box.z;
edriftend = calc_energy(0, intfce, 0, topo, conf, sim,0);
pvdriftend = sim->press * volume - (double)topo->npart * log(volume) / sim->temper;
printf("Energy drift: %.15lf \n",edriftend - edriftstart - edriftchanges +pvdriftend -pvdriftstart);
printf("Starting energy+pv: %.8lf \n",edriftstart+pvdriftstart);
printf("Starting energy: %.8lf \n",edriftstart);
fflush(stdout);
/* End wang-landau*/
if (sim->wlm[0] > 0) {
sim->wl.min = sim->wl.hist[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
for (j=1;j < sim->wl.length[1];j++) {
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
}
}
sim->wl.wmin = sim->wl.weights[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
for (j=1;j < sim->wl.length[1];j++) {
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
}
}
wlwrite(&sim->wl,files->wloutfile);
wlend(&sim->wl);
if ( (sim->wlm[0] == 2)||(sim->wlm[1] == 2) ) {
mesh_end(&sim->wl.mesh);
mesh_end(&sim->wl.origmesh);
}
if ( (sim->wlm[0] == 5)||(sim->wlm[1] == 5)||(sim->wlm[0] == 6)||(sim->wlm[1] == 6) ) {
if ( sim->wl.radiushole != NULL ) free(sim->wl.radiushole);
if ( sim->wl.radiusholeold != NULL ) free(sim->wl.radiusholeold);
}
}
/*end movie*/
if (sim->movie > 0)
fclose (mf);
/*end cluster*/
if(sim->write_cluster){
fclose(cl_stat);
fclose(cl);
}
if (report < nsweeps) {
fclose(ef);
fclose(statf);
}
}
/*..................................MOVES.........................................*/
/*................................................................................*/
/*..............................PARTICLE MOVES....................................*/
double particlemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *))
{
double edriftchanges =0.0;
long target;
double ran2(long *);
double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target);
double partrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target);
/*=== This is a particle move step ===*/
target = ran2(&seed) * topo->npart;
if ( ((ran2(&seed) < 0.5) || (topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0] >= SP)) ) { /* no rotation for spheres */
//target = 1;
//printf ("displacement\n\n");
edriftchanges = partdisplace(topo,sim,conf,intfce,target);
} else {
/*=== Rotation step ===*/
edriftchanges = partrotate(topo,sim,conf,intfce,target);
}
/*=== End particle move step ===*/
return edriftchanges;
}
/*................................................................................*/
double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target)
{
double edriftchanges,energy,enermove,wlener;
struct vector orig, dr, origsyscm;
int reject=0,wli;
double radiusholemax_orig=0;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *, long);
void wlaccept(int, struct wls *);
long meshorder_moveone(struct vector, struct vector, struct meshs *, long, long, \
struct conf * conf, struct sim * sim, int wli);
int mesh_cpy(struct meshs *, struct meshs *);
//void mesh_print (struct meshs *);
long z_order(struct wls *, struct conf * conf, int wli);
long twopartdist(struct wls *, struct conf *conf, int wli);
struct vector ranvec(void);
int longarray_cpy (long **, long **, long, long);
long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target, int wli,struct vector *);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== Displacement step ===*/
edriftchanges =0.0;
origsyscm.x = 0;
origsyscm.y = 0;
origsyscm.z = 0;
energy = calc_energy(target, intfce, 1, topo, conf, sim,0);
orig = conf->particle[target].pos;
dr = ranvec();
//ran = sqrt(ran2(&seed));
dr.x *= sim->trans[conf->particle[target].type].mx/conf->box.x;
dr.y *= sim->trans[conf->particle[target].type].mx/conf->box.y;
dr.z *= sim->trans[conf->particle[target].type].mx/conf->box.z;
conf->particle[target].pos.x += dr.x;
conf->particle[target].pos.y += dr.y;
conf->particle[target].pos.z += dr.z;
//} while (conf->particle[target].pos.x < 0.25 || conf->particle[target].pos.x > 0.50);
reject = 0;
wlener = 0.0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: origsyscm = conf->syscm;
conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = meshorder_moveone(orig, conf->particle[target].pos, &sim->wl.mesh, topo->npart, target, conf, sim,wli);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
origsyscm = conf->syscm;
conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
if ( target == 0 )
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
else
sim->wl.neworder[wli] = radiusholeorder_moveone(&orig, conf, sim,target,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
if ( target == 0 )
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
else
sim->wl.neworder[wli] = contparticles_moveone(&orig,conf,sim,target,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
enermove = calc_energy(target, intfce, 1, topo, conf, sim,0);
}
if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */
conf->particle[target].pos = orig;
sim->trans[conf->particle[target].type].rej++;
if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) )
conf->syscm = origsyscm;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->trans[conf->particle[target].type].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
//printf("%lf\t%lf\n", conf->particle[0].pos.z * conf->box.z , enermove);
//printf("%.12f\t%.12f\t%.12f\n", energy , enermove,edriftchanges);
}
return edriftchanges;
}
/*................................................................................*/
double partrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target)
{
double edriftchanges,energy,enermove,wlener;
struct particles origpart;
int reject=0,wli;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
void normalise(struct vector *);
void ortogonalise(struct vector *,struct vector);
void psc_rotate(struct particles *,double,int);
/*=== Rotation step ===*/
//printf ("rotation %ld npart %ld\n\n",target,npart);
energy = calc_energy(target, intfce, 1, topo, conf, sim,0);
origpart = conf->particle[target];
psc_rotate(&conf->particle[target],sim->rot[conf->particle[target].type].angle, topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0]);
/*should be normalised and ortogonal but we do for safety*/
normalise (&conf->particle[target].dir);
ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir);
reject = 0;
edriftchanges =0.0;
wlener = 0.0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 3:
if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] );
else sim->wl.neworder[wli] = sim->wl.currorder[wli];
/* only rotation change direction */
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
enermove = calc_energy(target, intfce, 1, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->particle[target] = origpart;
sim->rot[conf->particle[target].type].rej++;
wlreject(sim,sim->wl.radiusholemax);
} else { /* move was accepted */
// DEBUG
//fprintf(fenergy, "%lf\t%lf\n", conf->particle[1].pos.x * conf->box.x , enermove);
sim->rot[conf->particle[target].type].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
//printf("%lf\t%lf\n", conf->particle[0].patchdir[0].z, enermove);
}
return edriftchanges;
}
/*..................... This is an attempt to switch a type.................................*/
double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *) )
{
double edriftchanges,energy,enermove,wlener;
int reject=0,wli;
long target;
double radiusholemax_orig=0;
double ran2(long *);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
void int_partvec(long, struct ia_param *, struct conf *);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
int mesh_cpy(struct meshs *, struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
int longarray_cpy (long **target, long **source,long,long);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== This is an attempt to switch a type ===*/
edriftchanges =0.0;
wlener = 0.0;
target = ran2(&seed) * topo->n_switch_part;
target = topo->switchlist[target];
DEBUG_SIM("Switching the particle type");
DEBUG_SIM("PARTICLE: %ld", target);
energy = calc_energy(target, intfce, 1, topo, conf, sim,0);
// Start switching the type
int switched = conf->particle[target].switched;
int pmone = PMONE(switched);
DEBUG_SIM("switched = %d", switched);
DEBUG_SIM("pmone = %d", pmone);
int tmp_type = conf->particle[target].type;
conf->particle[target].type = conf->particle[target].switchtype;
conf->particle[target].switchtype = tmp_type;
conf->particle[target].switched += pmone;
int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf);
DEBUG_SIM("Particle %ld is %d switched", target, switched);
//DEBUG
#ifdef DEBUGGING_SIM
if ((abs(pmone) != 1) || (conf->particle[target].type == conf->particle[target].switchtype)){
fprintf(stderr, "ERROR: Something went wrong, when switching the type of particle %ld\n", target);
exit(1);
}
#endif
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
/*case 1: sim->wl.neworder = z_order(&sim->wl, conf,wli);
break;*/
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
/*case 4:
sim->wl.neworder = twopartdist(&sim->wl,conf,wli);
break;*/
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) {
enermove = conf->particle[target].delta_mu * pmone;
// DEBUG
//double dmu = enermove;
//particle[target].switched += pmone;
enermove += calc_energy( target, intfce, 1, topo, conf, sim,0);
//printf("energy: %lf \t %lf\t%lf\n",particle[target].delta_mu, dmu, enermove);
}
// If not accepted: switch back
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
DEBUG_SIM("Did NOT switch it\n");
conf->particle[target].switchtype = conf->particle[target].type;
conf->particle[target].type = tmp_type;
conf->particle[target].switched -= pmone;
int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf);
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
return edriftchanges;
}
/*.................................CHAIN MOVES....................................*/
/*................................................................................*/
double chainmove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *))
{
double edriftchanges =0.0;
long target;
double ran2(long *);
double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target);
double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target);
/*=== This is a chain move step ===*/
target = ran2(&seed) * topo->chainnum;
if (ran2(&seed) < 0.5) {
/*=== Displacement step of cluster/chain ===*/
edriftchanges = chaindisplace(topo,sim,conf,intfce,target);
} else {
/*=== Rotation step of cluster/chain ===*/
edriftchanges = chainrotate(topo,sim,conf,intfce,target);
} /* ==== END OF CHAIN MOVES ===== */
return edriftchanges;
}
/*................................................................................*/
double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target)
{
double edriftchanges,energy,enermove,wlener;
struct vector dr, origsyscm;
int reject=0,wli;
struct vector cluscm;
long current,i;
struct particles chorig[MAXCHL];
double radiusholemax_orig=0;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \
struct sim * sim, struct particles chorig[MAXCHL],int);
int mesh_cpy(struct meshs *, struct meshs *);
//void mesh_print (struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
struct vector ranvec(void);
int longarray_cpy (long **target, long **source,long,long);
long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, \
struct sim * sim,struct particles chorig[MAXCHL],int,struct vector *);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== Displacement step of cluster/chain ===*/
//printf ("move chain\n\n");
energy =0.0;
wlener = 0.0;
edriftchanges=0.0;
i=0;
current = topo->chainlist[target][0];
cluscm.x = 0;
cluscm.y = 0;
cluscm.z = 0;
origsyscm.x = 0;
origsyscm.y = 0;
origsyscm.z = 0;
while (current >=0 ) { /* store old configuration calculate energy*/
chorig[i].pos = conf->particle[current].pos;
energy += calc_energy(current, intfce, 2, topo, conf, sim, target);
i++;
current = topo->chainlist[target][i];
}
dr = ranvec();
dr.x *= sim->chainm[conf->particle[target].chaint].mx/conf->box.x;
dr.y *= sim->chainm[conf->particle[target].chaint].mx/conf->box.y;
dr.z *= sim->chainm[conf->particle[target].chaint].mx/conf->box.z;
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) { /* move chaine to new position */
if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) { /* calculate move of center of mass */
cluscm.x += dr.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.y += dr.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.z += dr.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
}
conf->particle[current].pos.x += dr.x;
conf->particle[current].pos.y += dr.y;
conf->particle[current].pos.z += dr.z;
i++;
current = topo->chainlist[target][i];
}
enermove = 0.0;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: origsyscm = conf->syscm;
conf->syscm.x += cluscm.x / conf->sysvolume;
conf->syscm.y += cluscm.y / conf->sysvolume;
conf->syscm.z += cluscm.z / conf->sysvolume;
sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
origsyscm = conf->syscm;
conf->syscm.x += cluscm.x / conf->sysvolume;
conf->syscm.y += cluscm.y / conf->sysvolume;
conf->syscm.z += cluscm.z / conf->sysvolume;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
if ( target == 0 )
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
else
sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
if ( target == 0 )
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
else
sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
enermove += calc_energy(current, intfce, 2, topo, conf, sim,target);
i++;
current = topo->chainlist[target][i];
}
}
if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
conf->particle[current].pos = chorig[i].pos;
i++;
current = topo->chainlist[target][i];
}
sim->chainm[conf->particle[target].chaint].rej++;
if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) )
conf->syscm = origsyscm;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->chainm[conf->particle[target].chaint].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
return edriftchanges;
}
/*................................................................................*/
double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target)
{
double edriftchanges,energy,enermove,wlener;
int reject=0,wli;
struct vector cluscm;
double chainvolume;
long current, i;
struct particles chorig[MAXCHL];
double radiusholemax_orig=0;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \
struct sim * sim, struct particles chorig[MAXCHL],int);
int mesh_cpy(struct meshs *, struct meshs *);
void cluster_rotate(long, struct vector, double, struct topo * topo, struct conf * conf);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
int longarray_cpy (long **target, long **source,long,long);
long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,\
struct particles chorig[MAXCHL],int,struct vector *);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== Rotation step of cluster/chain ===*/
//printf ("rotation of chain\n\n");
energy=0.0; /* set values to zero*/
edriftchanges=0.0;
wlener = 0.0;
current = topo->chainlist[target][0];
cluscm.x = conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.y = conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.z = conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
chorig[0] = conf->particle[current];
chainvolume = topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
energy += calc_energy(current, intfce, 2, topo, conf, sim,target);
i=1;
current = topo->chainlist[target][i];
while (current >=0 ) { /* store old configuration calculate energy*/
chorig[i] = conf->particle[current];
/*We have chains whole! don't have to do PBC*/
/*r_cm.x = conf->particle[current].pos.x - conf->particle[first].pos.x;
r_cm.y = conf->particle[current].pos.y - conf->particle[first].pos.y;
r_cm.z = conf->particle[current].pos.z - conf->particle[first].pos.z;
if ( r_cm.x < 0 )
r_cm.x -= (double)( (long)(r_cm.x-0.5) );
else
r_cm.x -= (double)( (long)(r_cm.x+0.5) );
if ( r_cm.y < 0 )
r_cm.y -= (double)( (long)(r_cm.y-0.5) );
else
r_cm.y -= (double)( (long)(r_cm.y+0.5) );
if ( r_cm.z < 0 )
r_cm.z -= (double)( (long)(r_cm.z-0.5) );
else
r_cm.z -= (double)( (long)(r_cm.z+0.5) );
*/
cluscm.x += conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.y += conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.z += conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
chainvolume += topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
energy += calc_energy(current, intfce, 2, topo, conf, sim,target);
i++;
current = topo->chainlist[target][i];
}
cluscm.x = cluscm.x/chainvolume;
cluscm.y = cluscm.y/chainvolume;
cluscm.z = cluscm.z/chainvolume;
/*do actual rotations around geometrical center*/
cluster_rotate(target, cluscm, sim->chainr[conf->particle[target].chaint].angle, topo, conf);
enermove=0.0;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1:
if (target == 0) sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
else sim->wl.neworder[wli] = sim->wl.currorder[wli];
/* if we rotated cluster it is around its CM so no change*/
break;
case 2:
mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli);
break;
case 3:
if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] );
else sim->wl.neworder[wli] = sim->wl.currorder[wli];
/* only rotation change direction */
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
if ( target == 0 )
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
else
sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
if ( target == 0 )
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
else
sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
enermove += calc_energy(current, intfce, 2, topo, conf, sim,target);
i++;
current = topo->chainlist[target][i];
}
}
if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
conf->particle[current] = chorig[i];
i++;
current = topo->chainlist[target][i];
}
sim->chainr[conf->particle[target].chaint].rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->chainr[conf->particle[target].chaint].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
return edriftchanges;
}
/*..............................PRESSURE MOVES....................................*/
/*................................................................................*/
double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *))
{
double edriftchanges,energy,enermove,wlener;
int reject=0,wli;
double old_side; /* Box length before attempted change */
double *side; /* Box dimension to try changing */
double psch; /* Size of a box change during pressure */
double pvol; /* Size of a volume during pressure */
double pvoln; /* Size of a new volume during pressure */
double rsave; /* Saved random number */
double area;
double radiusholemax_orig=0;
double ran2(long *);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
int mesh_cpy(struct meshs *, struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
int longarray_cpy (long **target, long **source,long,long);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== This is a volume change step ===*/
/*calculate energy*/
edriftchanges=0.0;
wlener = 0.0;
energy = calc_energy(0, intfce, 0, topo, conf, sim,0);
/* Choose an edge */
switch (sim->ptype) {
case 0:
/* Anisotropic pressure coupling */
rsave = ran2(&seed);
if (rsave < 1.0/3.0) {
side = &(conf->box.x);
area = conf->box.y * conf->box.z;
} else if (rsave < 2.0/3.0) {
side = &(conf->box.y);
area = conf->box.x * conf->box.z;
} else {
side = &(conf->box.z);
area = conf->box.x * conf->box.y;
}
old_side = *side;
*side += sim->edge.mx * (ran2(&seed) - 0.5);
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1:
sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2:
mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calculate energy */
enermove = sim->press * area * (*side - old_side) - (double)topo->npart * log(*side/old_side) / sim->temper;
enermove += calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || *side <= 0.0 || ( movetry(energy,enermove,sim->temper) ) ) { /* probability acceptance */
*side = old_side;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
case 1:
/* Isotropic pressure coupling */
psch = sim->edge.mx * (ran2(&seed) - 0.5);
pvol = conf->box.x * conf->box.y * conf->box.z;
conf->box.x += psch;
conf->box.y += psch;
conf->box.z += psch;
pvoln = conf->box.x * conf->box.y * conf->box.z;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: sim->wl.neworder[wli] = z_order(&sim->wl,conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
enermove = sim->press * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper;
enermove += calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->box.x -= psch;
conf->box.y -= psch;
conf->box.z -= psch;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
case 2:
/* Isotropic pressure coupling in xy, z constant */
psch = sim->edge.mx * (ran2(&seed) - 0.5);
pvol = conf->box.x * conf->box.y;
conf->box.x += psch;
conf->box.y += psch;
pvoln = conf->box.x * conf->box.y;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
/*no change in case 1, it does not change box.z*/
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calculate energy */
enermove = sim->press * conf->box.z * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper;
enermove += calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->box.x -= psch;
conf->box.y -= psch;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
case 3:
/* Isotropic pressure coupling in xy, z coupled to have fixed volume */
psch = sim->edge.mx * (ran2(&seed) - 0.5);
pvol = conf->box.x * conf->box.y * conf->box.z;
conf->box.x += psch;
conf->box.y += psch;
conf->box.z = pvol / conf->box.x / conf->box.y;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calculate energy */
enermove = calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->box.x -= psch;
conf->box.y -= psch;
conf->box.z = pvol / conf->box.x / conf->box.y;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
default:
fprintf (stderr, "ERROR: unknown type of pressure coupling %d",sim->ptype);
exit(1);
}
/*=== End volume change step ===*/
return edriftchanges;
}
/*..................... Switch replicas move in MPI ..............................*/
/*.................................................................................*/
double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long sweep )
{
double edriftchanges=0.0;
#ifdef MPI
double change, *recwlweights;
MPI_Status status;
int oddoreven,count,wli,sizewl = 0;
struct mpiexchangedata localmpi,receivedmpi;
BOOL reject;
long localwl,receivedwl;
double ran2(long *);
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf);
int longarray_cpy (long **target, long **source,long,long);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
void wlaccept(int, struct wls *);
//int mpi_newdatatypes();
//mpi_newdatatypes();
int i;
struct vector vec;
struct particles part;
struct mpiexchangedata exch;
MPI_Aint dispstart;
MPI_Datatype MPI_vector;
MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE};
int blocklen[3] = {1, 1, 1};
MPI_Aint disp[3];
MPI_Address( &vec, &dispstart);
MPI_Address( &(vec.x), &disp[0]);
MPI_Address( &(vec.y), &disp[1]);
MPI_Address( &(vec.z), &disp[2]);
for (i=0; i <3; i++) disp[i] -= dispstart;
MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector);
MPI_Type_commit( &MPI_vector);
MPI_Datatype MPI_Particle;
MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT};
int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,};
MPI_Aint disp2[11];
MPI_Address( &part, &dispstart);
MPI_Address( &(part.pos), &disp2[0]);
MPI_Address( &(part.dir), &disp2[1]);
MPI_Address( &(part.patchdir), &disp2[2]);
MPI_Address( &(part.patchsides), &disp2[3]);
MPI_Address( &(part.chdir), &disp2[4]);
MPI_Address( &(part.chaint), &disp2[5]);
MPI_Address( &(part.chainn), &disp2[6]);
MPI_Address( &(part.type), &disp2[7]);
MPI_Address( &(part.switchtype), &disp2[8]);
MPI_Address( &(part.delta_mu), &disp2[9]);
MPI_Address( &(part.switched), &disp2[10]);
for (i=0; i <11; i++) disp2[i] -= dispstart;
MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle);
MPI_Type_commit( &MPI_Particle);
if (sim->wl.length[1] > 0) {
sizewl = sim->wl.length[1] * sim->wl.length[0];
} else {
sizewl = sim->wl.length[0];
}
MPI_Datatype MPI_exchange;
MPI_Datatype type3[7] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_vector, MPI_LONG, MPI_LONG};
int blocklen3[7] = {1, 1, 1, 1, 1, 1, 2};
MPI_Aint disp3[7];
MPI_Address( &exch, &dispstart);
MPI_Address( &(exch.box), &disp3[0]);
MPI_Address( &(exch.energy), &disp3[1]);
MPI_Address( &(exch.volume), &disp3[2]);
MPI_Address( &(exch.accepted), &disp3[3]);
MPI_Address( &(exch.syscm), &disp3[4]);
MPI_Address( &(exch.radiusholemax), &disp3[5]);
MPI_Address( &(exch.wl_order), &disp3[6]);
for (i=0; i <7; i++) disp3[i] -= dispstart;
MPI_Type_struct(7, blocklen3, disp3, type3, &MPI_exchange);
MPI_Type_commit( &MPI_exchange);
/*=== This is an attempt to switch replicas ===*/
localmpi.box = conf->box;
localmpi.energy = calc_energy(0, intfce, 0, topo, conf, sim,0);
localmpi.volume = conf->box.x * conf->box.y * conf->box.z;
localmpi.accepted = 0;
localmpi.syscm = conf->syscm;
localmpi.radiusholemax = sim->wl.radiusholemax;
recwlweights = malloc( sizeof(double) * sizewl );
for (wli=0;wli<2;wli++) {
localmpi.wl_order[wli] = 0;
receivedmpi.wl_order[wli] = 0;
}
for (wli=0;wli<sim->wl.wlmdim;wli++) {
localmpi.wl_order[wli] = sim->wl.currorder[wli];
//fprintf(stdout,"wli %d %ld %ld\n\n", wli, localmpi.wl_order[wli], sim->wl.currorder[wli] );
}
if ( (sweep % (2*sim->nrepchange)) == 0)
/* exchange odd ones with even ones*/
oddoreven=1;
else
/* exchange even ones with odd ones*/
oddoreven=0;
if (sim->mpinprocs == 2)
oddoreven=1;
count = 1;
if (sim->mpirank % 2 == oddoreven) {
if (sim->mpirank > 0) {
MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank-1, count, MPI_COMM_WORLD);
MPI_Send(sim->wl.weights, sizewl, MPI_DOUBLE, sim->mpirank-1, count, MPI_COMM_WORLD);
//printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure);
MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
/*decision of accepting or rejecting the exchange was done on other process
here we took received configuration (if move was accepted))*/
//printf("received data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume,receivedmpi.pressure);
if (receivedmpi.accepted == 1) {
sim->mpiexch.acc++;
struct particles *temppart;
temppart = malloc(topo->npart*sizeof(struct particles));
MPI_Recv(temppart, topo->npart, MPI_Particle, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
/* printf("received data: rank: %d\n", sim->mpirank);
printf("part0 x %f y %f z %f\n",temppart[0].pos.x, temppart[0].pos.y, temppart[0].pos.z);
printf("part1 x %f y %f z %f\n",temppart[1].pos.x, temppart[1].pos.y, temppart[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",temppart[0].chaint,temppart[0].chainn,temppart[0].type);
*/
MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank-1, count, MPI_COMM_WORLD);
/* printf("send data: rank: %d\n",sim->mpirank);
printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z);
printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type);
*/
localmpi.accepted = receivedmpi.accepted;
conf->box = receivedmpi.box;
conf->syscm = receivedmpi.syscm;
memcpy(conf->particle,temppart,topo->npart*sizeof(struct particles));
edriftchanges = receivedmpi.energy - localmpi.energy;
edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper;
if ( sim->wlm[0] >0 ) {
for (wli=0;wli<sim->wl.wlmdim;wli++) {
sim->wl.neworder[wli] = receivedmpi.wl_order[wli];
}
wlaccept(sim->wlm[0],&sim->wl);
//exchange wl data mesh size and radius hole s
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 2:
/*it is complicated to send because of different sizes
we would have to send sizes first and realocate corrrect mesh size and then send data
it is better to recalculate (a bit slower though)*/
mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim);
break;
case 5:
//radiushole_all(topo,conf,sim,wli,&(conf->syscm));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 6:
//radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 7:
//contparticles_all(topo,conf,sim,wli);
MPI_Recv(&(sim->wl.partincontactold),1, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD);
sim->wl.partincontact=sim->wl.partincontactold;
break;
}
}
}
free(temppart);
} else {
sim->mpiexch.rej++;
if ( sim->wlm[0] > 0 ) {
sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha;
sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++;
}
}
}
} else {
if (sim->mpirank+1 < sim->mpinprocs) {
/*there is above process*/
MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(recwlweights, sizewl, MPI_DOUBLE, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
/*we got new configuration*/
//printf("received data: rank: %d energy: %f volume: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume);
/*evaluate if accepte or reject the configuration*/
/*acc = exp( (1/sim->temper - 1/(sim->temper + sim.dtemp)) * (E_here - E_received) +
(sim->press /sim->temper - pressure_received /(sim.temper + sim->dtemp)) * (V_here - V_received)
if pressure the same it it simplier*/
reject = FALSE;
change = (1/sim->temper - 1/(sim->temper + sim->dtemp)) * (localmpi.energy - receivedmpi.energy);
//printf("acceptance decision: change: %f localE: %f receivedE: %f tempf: %f \n",change,localmpi.energy,receivedmpi.energy,(1/sim->temper - 1/(sim->temper + sim->dtemp)));
change += (sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)) * (localmpi.volume - receivedmpi.volume);
//printf("pressf: %f \n",(sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)));
if (sim->wlm[0] > 0) {
localwl = sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0];
receivedwl = receivedmpi.wl_order[0] + receivedmpi.wl_order[1]*sim->wl.length[0];
//fprintf(stdout,"decide wl %ld %ld %ld energychange: %f \n", receivedmpi.wl_order[0], receivedmpi.wl_order[1], receivedwl, change );
//fprintf(stdout,"local weights %ld %f %ld %f \n",localwl,sim->wl.weights[localwl],receivedwl,sim->wl.weights[receivedwl]);
change += (-sim->wl.weights[localwl] + sim->wl.weights[receivedwl] )/sim->temper + ( -recwlweights[receivedwl] + recwlweights[localwl])/(sim->temper + sim->dtemp) ;
//fprintf(stdout,"wlchange %f \n\n",change);
}
if ( (!(reject)) && ( (change > 0) || (ran2(&seed) < exp(change)) ) ) {
/* Exchange ACCEPTED send local stuff*/
//printf("exchange accepted \n");
sim->mpiexch.acc++;
localmpi.accepted = 1;
conf->box = receivedmpi.box;
conf->syscm = receivedmpi.syscm;
edriftchanges = receivedmpi.energy - localmpi.energy;
edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper;
//printf("edrift %f\n",edriftchanges);
if ( sim->wlm[0] > 0 ) {
for (wli=0;wli<sim->wl.wlmdim;wli++) {
sim->wl.neworder[wli] = receivedmpi.wl_order[wli];
}
wlaccept(sim->wlm[0],&sim->wl);
}
MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD);
//printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure);
/*send and receive configuration*/
MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, count, MPI_COMM_WORLD);
/* printf("send data: rank: %d\n",sim->mpirank);
printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z);
printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type);
*/
MPI_Recv(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
/* printf("recieved data: rank: %d\n",sim->mpirank);
printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z);
printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type);
*/
if ( sim->wlm[0] > 0 ) {
//exchange wl data mesh size and radius hole s
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 2:
/*it is complicated to send because of different sizes
we would have to send sizes first and realocate corrrect mesh size and then send data
it is better to recalculate (a bit slower though)*/
mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim);
break;
case 5:
//radiushole_all(topo,conf,sim,wli,&(conf->syscm));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 6:
//radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 7:
//contparticles_all(topo,conf,sim,wli);
MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD);
MPI_Recv(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
break;
}
}
}
} else {
/*if exchange rejected send back info */
//printf("exchange rejected\n");
sim->mpiexch.rej++;
MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD);
if ( sim->wlm[0] > 0 ) {
sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha;
sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++;
}
}
}
}
if ( (localmpi.accepted) && (sim->pairlist_update) )
gen_pairlist(topo, sim, conf);
MPI_Type_free(&MPI_exchange);
MPI_Type_free(&MPI_Particle);
MPI_Type_free(&MPI_vector);
free(recwlweights);
#endif
return edriftchanges;
}
/*int mpi_newdatatypes()
{
int i;
struct vector vec;
struct particles part;
struct mpiexchangedata exch;
MPI_Aint dispstart;
MPI_Datatype MPI_vector;
MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE};
int blocklen[3] = {1, 1, 1};
MPI_Aint disp[3];
MPI_Address( &vec, &dispstart);
MPI_Address( &(vec.x), &disp[0]);
MPI_Address( &(vec.y), &disp[1]);
MPI_Address( &(vec.z), &disp[2]);
for (i=0; i <3; i++) disp[i] -= dispstart;
MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector);
MPI_Type_commit( &MPI_vector);
MPI_Datatype MPI_Particle;
MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT};
int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,};
MPI_Aint disp2[11];
MPI_Address( &part, &dispstart);
MPI_Address( &(part.pos), &disp2[0]);
MPI_Address( &(part.dir), &disp2[1]);
MPI_Address( &(part.patchdir), &disp2[2]);
MPI_Address( &(part.patchsides), &disp2[3]);
MPI_Address( &(part.chdir), &disp2[4]);
MPI_Address( &(part.chaint), &disp2[5]);
MPI_Address( &(part.chainn), &disp2[6]);
MPI_Address( &(part.type), &disp2[7]);
MPI_Address( &(part.switchtype), &disp2[8]);
MPI_Address( &(part.delta_mu), &disp2[9]);
MPI_Address( &(part.switched), &disp2[10]);
for (i=0; i <11; i++) disp2[i] -= dispstart;
MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle);
MPI_Type_commit( &MPI_Particle);
MPI_Datatype MPI_exchange;
MPI_Datatype type3[5] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT};
int blocklen3[5] = {1, 1, 1, 1, 1};
MPI_Aint disp3[5];
MPI_Address( &exch, &dispstart);
MPI_Address( &(exch.box), &disp3[0]);
MPI_Address( &(exch.energy), &disp3[1]);
MPI_Address( &(exch.volume), &disp3[2]);
MPI_Address( &(exch.pressure), &disp3[3]);
MPI_Address( &(exch.accepted), &disp3[4]);
for (i=0; i <5; i++) disp3[i] -= dispstart;
MPI_Type_struct( 5, blocklen3, disp3, type3, &MPI_exchange);
MPI_Type_commit( &MPI_exchange);
return 0;
}*/
/*................................................................................*/
/*................................................................................*/
/*....................END OF MOVES, INTERACTION FUNCTIONS FOLLOW..................*/
/*................................................................................*/
/*..............................................................................*/
/*
Determines total energy of two spherocylinders type PSC PSC
*/
double e_psc_psc(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_psc_psc(struct interacts *,int,int);
closestdist(interact);
repenergy = erepulsive(interact);
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) )
secondCH = TRUE;
if (firstCH)
interact->part1->dir = interact->part1->chdir[0];
if (secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ( (firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_psc_psc(interact,0,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ||
(interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH && secondCH) {
interact->part1->dir = interact->part1->chdir[1];
interact->part2->dir = interact->part2->chdir[0];
closestdist(interact);
}
if (firstCH && !secondCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
if (!firstCH && secondCH) {
interact->part2->dir = interact->part2->chdir[0];
closestdist(interact);
}
atrenergy += eattractive_psc_psc(interact,1,0);
}
if ( (firstT) && (secondT) ) {
if (firstCH && secondCH) {
interact->part1->dir = interact->part1->chdir[1];
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
if (firstCH && !secondCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
if (!firstCH && secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_psc(interact,1,1);
}
if (secondT) {
if (firstCH && secondCH) {
interact->part1->dir = interact->part1->chdir[0];
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
if (firstCH && !secondCH) {
interact->part1->dir = interact->part1->chdir[0];
interact->part2->dir = olddir2;
closestdist(interact);
}
if (!firstCH && secondCH) {
interact->part1->dir = olddir1;
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_psc(interact,0,1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of two spherocylinders type PSC PSC
*/
double eattractive_psc_psc(struct interacts * interact,int patchnum1,int patchnum2)
{
int i, intrs;
double rcut, atrenergy, ndist;
double v1, v2, f0, f1, f2, T1, T2, S1, S2, a;
double intersections[5];
struct vector vec1, vec2, vec_intrs, vec_mindist;
struct vector vec_sub(struct vector, struct vector);
struct vector vec_sum(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector *, struct vector*);
struct vector mindist_segments(struct vector, double, struct vector, double, struct vector);
void normalise(struct vector *);
int psc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum);
double fanglscale(double, struct ia_param *, int which);
rcut = interact->param->rcut;
//interact->halfl = interact->param->half_len[0];
//DEBUG_SIM("halfl = %lf", interact->halfl);
for(i=0;i<5;i++)
intersections[i]=0;
//cospatch = param.pcanglsw;
//cospatchinr = param.pcangl;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at.
cut distance C*/
//DEBUG_SIM("first intersection");
intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
if (intrs <2){
//DEBUG_SIM("No intersection :(");
return 0.0; /*sc is all outside patch, attractive energy is 0*/
}
T1=intersections[0]; /*points on sc2*/
T2=intersections[1];
/*2- now do the same oposite way psc1 in patch of psc2*/
for(i=0;i<5;i++)
intersections[i]=0;
//DEBUG_SIM("get vector");
vec1=vec_scale(interact->r_cm,-1.0);
//DEBUG_SIM("second intersection");
intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
S1=intersections[0]; /*points on sc1*/
S2=intersections[1];
/*3- scaling function1: dependence on the length of intersetions*/
v1=fabs(S1-S2);
v2=fabs(T1-T2);
f0=0.5*(v1+v2);
/*4a- with two intersection pices calculate vector between their CM
-this is for angular orientation*/
vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5);
vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5);
vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x;
vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y;
vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z;
/*vec_intrs should be from sc1 to sc2*/
//fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z);
/*4b - calculate closest distance attractive energy from it*/
vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs);
//fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z);
ndist=sqrt(DOT(vec_mindist,vec_mindist));
//dist=DOT(vec_intrs,vec_intrs);
if (ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
//atrenergy = -1.0;
else {
atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*5- scaling function2: angular dependence of patch1*/
vec1=vec_scale(vec_intrs,1.0);
//vec1=vec_scale(vec_mindist,-1.0);
vec1=vec_perpproject(&vec1, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
f1 = fanglscale(a,interact->param, 0+2*patchnum1);
/*6- scaling function3: angular dependence of patch2*/
vec1=vec_scale(vec_intrs,-1.0);
//vec1=vec_scale(vec_mindist,1.0);
vec1=vec_perpproject(&vec1, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum2]);
f2 = fanglscale(a,interact->param, 1+2*patchnum2);
//printf("v1: %f v2: %f f0: %f f1: %f f2: %f ener: %f\n",v1,v2,f0,f1,f2,atrenergy);
/*7- put it all together*/
atrenergy *=f0*f1*f2;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
// fprintf (stderr, "attraction %.8f \n",atrenergy);
// exit(1);
return atrenergy;
}
/* a = r_ij * n_i */
double fanglscale(double a, struct ia_param * param, int which)
{
double f;
// TODO for different types
if (a <= param->pcanglsw[which])
f=0.0;
else {
if (a >= param->pcangl[which])
f=1.0;
else {
f = 0.5 - ((param->pcanglsw[which] + param->pcangl[which])*0.5 - a )/(param->pcangl[which] - param->pcanglsw[which]);
}
}
return f;
}
/*CPSC..............................................................................*/
/*
Determines total energy of two spherocylinders of type 3 -cylindrical psc -CPSC
*/
double e_cpsc_cpsc(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_cpsc_cpsc(struct interacts *,int,int);
//DEBUG_SIM("do energy 33") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHCPSC)||(interact->param->geotype[0] == TCHCPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHCPSC)||(interact->param->geotype[1] == TCHCPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_cpsc_cpsc(interact,0,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,1,0);
}
if ( (firstT) && (secondT) ) {
if (secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,1,1);
}
if (secondT) {
if (firstT && firstCH ) {
interact->part1->dir = interact->part1->chdir[0];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,0,1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of two spherocylinders of type 3 -cylindrical psc -CPSC
*/
double eattractive_cpsc_cpsc(struct interacts * interact, int patchnum1, int patchnum2)
{
int i, intrs;
double rcut, atrenergy, v1, v2, f0, f1, f2, T1, T2, S1, S2, a, ndist;
double intersections[5];
struct vector vec1, vec2, vec_intrs, vec_mindist;
struct vector vec_sub(struct vector, struct vector);
struct vector vec_sum(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct vector mindist_segments(struct vector, double, struct vector, double, struct vector);
void normalise(struct vector *);
int cpsc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum);
double fanglscale(double, struct ia_param *, int which);
rcut = interact->param->rcut;
// interact->halfl = interact->param->half_len[0];
for(i=0;i<5;i++)
intersections[i]=0;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at.
cut distance C*/
intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
T1=intersections[0]; /*points on sc2*/
T2=intersections[1];
/*2- now do the same oposite way psc1 in patch of psc2*/
for(i=0;i<5;i++)
intersections[i]=0;
vec1=vec_scale(interact->r_cm,-1.0);
intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
S1=intersections[0]; /*points on sc1*/
S2=intersections[1];
/*3- scaling function1: dependence on the length of intersetions*/
v1=fabs(S1-S2);
v2=fabs(T1-T2);
f0=0.5*(v1+v2);
/*4a- with two intersection pices calculate vector between their CM
-this is for angular orientation*/
vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5);
vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5);
vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x;
vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y;
vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z;
/*vec_intrs should be from sc1 to sc2*/
// fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z);
/*4b - calculate closest distance attractive energy from it*/
vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs);
// fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z);
ndist=sqrt(DOT(vec_mindist,vec_mindist));
//dist=DOT(vec_intrs,vec_intrs);
if (ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*5- scaling function2: angular dependence of patch1*/
vec1=vec_scale(vec_intrs,1.0);
//vec1=vec_scale(vec_mindist,-1.0);
vec1=vec_perpproject(&vec1, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
f1 = fanglscale(a,interact->param, 0+2*patchnum1);
/*6- scaling function3: angular dependence of patch2*/
vec1=vec_scale(vec_intrs,-1.0);
//vec1=vec_scale(vec_mindist,1.0);
vec1=vec_perpproject(&vec1, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum2]);
f2 = fanglscale(a,interact->param, 1+2*patchnum2);
/*7- put it all together*/
atrenergy *=f0*f1*f2;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
// fprintf (stderr, "attraction %.8f \n",atrenergy);
// exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
Determines total energy of spherocylinders type PSC and CPSC
*/
double e_psc_cpsc(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_psc_cpsc(struct interacts *,int,int);
//DEBUG_SIM("do energy 23") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ((interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == CHCPSC)||
(interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstCH = TRUE;
if ((interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == CHCPSC)||
(interact->param->geotype[1] == TCHPSC) || (interact->param->geotype[1] == TCHCPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_psc_cpsc(interact,0,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ||
(interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ||
(interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TCPSC) || (interact->param->geotype[1] == TCHCPSC) ||
(interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH && secondCH) {
interact->part1->dir = interact->part1->chdir[1];
interact->part2->dir = interact->part2->chdir[0];
closestdist(interact);
}
if (firstCH && !secondCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
if (!firstCH && secondCH) {
interact->part2->dir = interact->part2->chdir[0];
closestdist(interact);
}
atrenergy += eattractive_psc_cpsc(interact,1,0);
}
if ( (firstT) && (secondT) ) {
if (firstCH && secondCH) {
interact->part1->dir = interact->part1->chdir[1];
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
if (firstCH && !secondCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
if (!firstCH && secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_cpsc(interact,1,1);
}
if (secondT) {
if (firstCH && secondCH) {
interact->part1->dir = interact->part1->chdir[0];
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
if (firstCH && !secondCH) {
interact->part1->dir = interact->part1->chdir[0];
interact->part2->dir = olddir2;
closestdist(interact);
}
if (!firstCH && secondCH) {
interact->part1->dir = olddir1;
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_cpsc(interact,0,1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of spherocylinders type PSC and CPSC
*/
double eattractive_psc_cpsc(struct interacts * interact,int patchnum1,int patchnum2)
{
int i, intrs;
double rcut, atrenergy, ndist;
double v1, v2, f0, f1, f2, T1, T2, S1, S2, a;
double intersections[5];
struct vector vec1, vec2, vec_intrs, vec_mindist;
struct vector vec_sub(struct vector, struct vector);
struct vector vec_sum(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct vector mindist_segments(struct vector, double, struct vector, double, struct vector);
void normalise(struct vector *);
int psc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum);
int cpsc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum);
double fanglscale(double, struct ia_param *, int which);
rcut = interact->param->rcut;
//interact->halfl = interact->param->half_len[0];
//DEBUG_SIM("halfl = %lf", interact->halfl);
for(i=0;i<5;i++)
intersections[i]=0;
BOOL first;
if ( (interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TPSC)||(interact->param->geotype[0] == TCHPSC) ){
first = TRUE;
} else {
first = FALSE;
}
//cospatch = param.pcanglsw;
//cospatchinr = param.pcangl;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at.
cut distance C*/
//DEBUG_SIM("first intersection");
if (first) {
intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
} else {
intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
}
//DEBUG_SIM("first intersection: done");
if (intrs <2){
//DEBUG_SIM("No intersection :(");
return 0.0; /*sc is all outside patch, attractive energy is 0*/
}
T1=intersections[0]; /*points on sc2*/
T2=intersections[1];
/*2- now do the same oposite way psc1 in patch of psc2*/
for(i=0;i<5;i++)
intersections[i]=0;
//DEBUG_SIM("get vector");
vec1=vec_scale(interact->r_cm,-1.0);
//DEBUG_SIM("second intersection");
if (first) {
intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
} else {
intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
}
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
S1=intersections[0]; /*points on sc1*/
S2=intersections[1];
/*3- scaling function1: dependence on the length of intersetions*/
v1=fabs(S1-S2);
v2=fabs(T1-T2);
f0=0.5*(v1+v2);
/*4a- with two intersection pices calculate vector between their CM
-this is for angular orientation*/
vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5);
vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5);
vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x;
vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y;
vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z;
/*vec_intrs should be from sc1 to sc2*/
// fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z);
/*4b - calculate closest distance attractive energy from it*/
vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs);
// fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z);
ndist=sqrt(DOT(vec_mindist,vec_mindist));
//dist=DOT(vec_intrs,vec_intrs);
if (ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
//atrenergy = -1.0;
else {
atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*5- scaling function2: angular dependence of patch1*/
vec1=vec_scale(vec_intrs,1.0);
//vec1=vec_scale(vec_mindist,-1.0);
vec1=vec_perpproject(&vec1, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
f1 = fanglscale(a,interact->param, 0+2*patchnum1);
/*6- scaling function3: angular dependence of patch2*/
vec1=vec_scale(vec_intrs,-1.0);
//vec1=vec_scale(vec_mindist,1.0);
vec1=vec_perpproject(&vec1, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum2]);
f2 = fanglscale(a,interact->param, 1+2*patchnum2);
/*7- put it all together*/
atrenergy *=f0*f1*f2;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
// fprintf (stderr, "attraction %.8f \n",atrenergy);
// exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
* Determines total energy of spherocylinder type 1 and sphere type 11
*/
double e_spa_sca(struct interacts * interact)
{
double atrenergy, repenergy, b, f0, halfl;
struct vector vec_perpproject(struct vector *, struct vector *);
void normalise(struct vector *);
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double fanglscale(double, struct ia_param *, int which);
//DEBUG printf ("do energy 111 \n\n");
closestdist(interact);
repenergy = erepulsive(interact);
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
/*calculate closest distance attractive energy*/
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*scaling function for the length of spherocylinder within cutoff*/
if (interact->param->geotype [0] < SP)
halfl = interact->param->half_len[0];
else
halfl = interact->param->half_len[1];
b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist);
if ( interact->contt + b > halfl )
f0 = halfl;
else
f0 = interact->contt + b;
if ( interact->contt - b < -halfl )
f0 -= -halfl;
else
f0 -= interact->contt - b;
atrenergy *= f0;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
//fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z);
//exit(1);
}
return repenergy+atrenergy;
}
/*..............................................................................*/
/*
* Determines total energy of spherocylinder type 2 and sphere type 11
*/
double e_psc_spa(struct interacts * interact)
{
double atrenergy = 0.0, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_psc_spa(struct interacts *, int);
//DEBUG_SIM("do energy 211") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
if(interact->dist < interact->param->rcut)
atrenergy = eattractive_psc_spa(interact,0);
//addition of interaction of second patches
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ||
(interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
if(interact->dist < interact->param->rcut)
atrenergy += eattractive_psc_spa(interact,1);
}
if (secondT) {
if(secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
if(interact->dist < interact->param->rcut)
atrenergy += eattractive_psc_spa(interact,1);
}
if ( (firstT) && (secondT) ) {
fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n");
exit(1);
}
}
interact->part1->dir = olddir1;
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
* Determines attractive energy of spherocylinder type 2 and sphere type 11
*/
double eattractive_psc_spa(struct interacts * interact, int patchnum1)
{
double atrenergy, a, b, f0, halfl;
struct vector vec1;
struct vector vec_perpproject(struct vector *, struct vector*);
void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
int which;
/*calculate closest distance attractive energy*/
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*scaling function: angular dependence of patch1*/
if (interact->param->geotype[0] < SP) {
which = 0;
vec1=vec_perpproject(&interact->distvec, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
halfl=interact->param->half_len[0];
} else {
which = 1;
vec1=vec_perpproject(&interact->distvec, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum1]);
halfl=interact->param->half_len[1];
}
// caling function for the length of spherocylinder within cutoff
b = sqrt(interact->param->rcut*interact->param->rcut - interact->dist*interact->dist);
if ( interact->contt + b > halfl )
f0 = halfl;
else
f0 = interact->contt + b;
if ( interact->contt - b < -halfl )
f0 -= -halfl;
else
f0 -= interact->contt - b;
atrenergy *= fanglscale(a,interact->param, which)*f0;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
//fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z);
//exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
Determines total energy of spherocylinder type 3 and sphere type 11
*/
double e_cpsc_spa(struct interacts * interact)
{
double atrenergy, repenergy, halfl;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_cpsc_spa(struct interacts *,int);
//DEBUG_SIM("do energy 311") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if (interact->param->geotype[0] < SP) {
halfl=interact->param->half_len[0];
} else {
halfl=interact->param->half_len[1];
}
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) || ( interact->dist > interact->param->rcut )
|| (interact->contt > halfl) || (interact->contt < -halfl) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHCPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHCPSC) || (interact->param->geotype[1] == TCHCPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
if(interact->dist < interact->param->rcut)
atrenergy = eattractive_cpsc_spa(interact,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
if(interact->dist < interact->param->rcut)
atrenergy += eattractive_cpsc_spa(interact,1);
}
if (secondT) {
if(secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
if(interact->dist < interact->param->rcut)
atrenergy += eattractive_cpsc_spa(interact,1);
}
if ( (firstT) && (secondT) ) {
fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n");
exit(1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of spherocylinder type 3 and sphere type 11
*/
double eattractive_cpsc_spa(struct interacts * interact,int patchnum1)
{
double atrenergy, a, b, f0, halfl;
struct vector vec1;
int which;
struct vector vec_perpproject(struct vector *, struct vector*);
void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
/*if it is in cylindrical part c>-halfl and c<halfl*/
/*calculate closest distance attractive energy*/
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*scaling function: angular dependence of patch1*/
if (interact->param->geotype[0] < SP) {
which = 0;
vec1=vec_perpproject(&interact->distvec, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
halfl = interact->param->half_len[0];
} else {
which = 1;
vec1=vec_perpproject(&interact->distvec, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum1]);
halfl = interact->param->half_len[1];
}
/*scaling function for the length of spherocylinder within cutoff*/
b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist);
if ( interact->contt + b > halfl )
f0 = halfl;
else
f0 = interact->contt + b;
if ( interact->contt - b < -halfl )
f0 -= -halfl;
else
f0 -= interact->contt - b;
atrenergy *= fanglscale(a,interact->param, which)*f0;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
//fprintf (stderr, "attraction311 %.8f a: %.8f\n",atrenergy,a);
//exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
Determines total energy of two spherocylinders type 11
*/
double e_2sca_or_2spa(struct interacts * interact)
{
double repenergy, atrenergy;
double erepulsive(struct interacts *);
void closestdist(struct interacts *);
closestdist(interact);
repenergy = erepulsive(interact);
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
}
return repenergy+atrenergy;
}
/*..............................................................................*/
/*
Determines total energy with purely repulsive types
*/
double e_spn_or_scn(struct interacts * interact)
{
double repenergy;
double erepulsive(struct interacts *);
void closestdist(struct interacts *);
closestdist(interact);
repenergy = erepulsive(interact);
return repenergy;
}
/*..............................................................................*/
/*
Determines repulsive energy of two spherocylinders
*/
double erepulsive(struct interacts * interact)
{
double repenergy, en6;
/* WCA repulsion */
if (interact->dist > interact->param->rcutwca) repenergy = 0.0;
else {
en6 = pow((interact->param->sigma/interact->dist),6);
repenergy = interact->param->epsilon * ( 4*en6*(en6-1) + 1.0);
}
//int Digs = 20;
//printf("dist: %.*e, repenergy: %.*e\n",Digs, interact->dist, Digs, repenergy);
return repenergy;
}
/*..............................................................................*/
/*
Indicates not yet programmed interaction
*/
double enoexist(struct interacts * interact)
{
double energy=0.0;
fprintf (stderr, "ERROR: We have not programed interaction of types %d and %d\n",
interact->part1->type,interact->part2->type);
exit (1);
return energy;
}
/* function for calculation of harmonic potential*/
double harmonic(double aktualvalue, double eqvalue, double springconst)
{
return springconst*(aktualvalue-eqvalue)*(aktualvalue-eqvalue)*0.5;
}
/*..............................................................................*/
/*
Determines bond energy
*/
double bondenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf)
{
double energy=0.0, bondlength, halfl;
struct vector vec1, vec2, vecbond;
int * geotype = interact->param->geotype;
struct vector image(struct vector, struct vector, struct vector);
double harmonic(double, double, double);
/*interaction with nearest neighbours -harmonic*/
if ((topo->chainparam[conf->particle[num1].chaint]).bond1c >= 0) {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z;
if (geotype[1] < SP)
halfl=interact->param->half_len[1];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
}
} else {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z;
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
}
}
}
}
/*interaction with second nearest neighbours -harmonic*/
if (topo->chainparam[conf->particle[num1].chaint].bond2c >= 0) {
if (num2 == topo->conlist[num1][2]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
else {
vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
}
} else {
if (num2 == topo->conlist[num1][3]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
else {
vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
}
}
}
}
/*interaction with nearest neighbours - direct harmonic bond*/
if ((topo->chainparam[conf->particle[num1].chaint]).bonddc > 0) {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bonddeq,topo->chainparam[conf->particle[num1].chaint].bonddc);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z;
if (geotype[1] < SP)
halfl=interact->param->half_len[1];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ;
vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ;
vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc);
}
} else {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ;
vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ;
vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ;
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc);
}
}
}
}
//printf("bondlength: %f\n",bondlength);
// printf("bondener: %f\n",energy);
return energy;
}
/*..............................................................................*/
/*
Determines angle energy between spherocylinders
*/
double angleenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf)
{
double energy=0.0, currangle, halfl;
struct vector vec1, vec2;
int * geotype = interact->param->geotype;
struct vector image(struct vector, struct vector, struct vector);
void normalise(struct vector *);
double harmonic(double, double, double);
/*angle interaction with nearest neighbours -harmonic*/
if ((topo->chainparam[conf->particle[num1].chaint]).angle1c >= 0) {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
/*spheres do not have this interaction*/
energy += 0.0;
else {
if (geotype[0] < SP)
vec1 = conf->particle[num1].dir;
else {
halfl=interact->param->half_len[1];
//sphere angle is defined versus the end of spherocylinder
vec1.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z;
vec1 = image(vec1, conf->particle[num1].pos, conf->box);
}
if (geotype[1] < SP)
vec2 = conf->particle[num2].dir;
else {
halfl=interact->param->half_len[0];
vec2.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z;
vec2 = image(vec2, conf->particle[num2].pos, conf->box);
}
normalise(&vec1);
normalise(&vec2);
currangle = acos(DOT(vec1,vec2));
energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle1eq,topo->chainparam[conf->particle[num1].chaint].angle1c);
}
} else {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
/*spheres do not have this interaction*/
energy += 0.0;
else {
if (geotype[0] < SP)
vec1 = conf->particle[num1].dir;
else {
halfl=interact->param->half_len[1];
//sphere angle is defined versus the end of spherocylinder
vec1.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z;
vec1 = image(vec1, conf->particle[num1].pos, conf->box);
}
if (geotype[1] < SP)
vec2 = conf->particle[num2].dir;
else {
halfl=interact->param->half_len[0];
vec2.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z;
vec2 = image(vec2, conf->particle[num2].pos, conf->box);
}
normalise(&vec1);
normalise(&vec2);
currangle = acos(DOT(vec1,vec2));
energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle1eq,topo->chainparam[conf->particle[num2].chaint].angle1c);
}
}
}
}
/*interaction between the orientation of spherocylinders patches -harmonic*/
if (topo->chainparam[conf->particle[num1].chaint].angle2c >= 0) {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] < SP) && (geotype[1] < SP) ) {
currangle = acos(DOT(conf->particle[num1].patchdir[0],conf->particle[num2].patchdir[0]) - DOT(conf->particle[num1].dir,conf->particle[num2].patchdir[0]) );
energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle2eq,topo->chainparam[conf->particle[num1].chaint].angle2c);
} else {
energy += 0.0;
}
} else {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] < SP) && (geotype[1] < SP) ) {
currangle = acos(DOT(conf->particle[num2].patchdir[0],conf->particle[num1].patchdir[0]) - DOT(conf->particle[num2].dir,conf->particle[num1].patchdir[0]) );
energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle2eq,topo->chainparam[conf->particle[num2].chaint].angle2c);
} else {
energy += 0.0;
}
}
}
}
// printf("angleener: %f\n",energy);
return energy;
}
/* cluses distance calculation*/
void closestdist(struct interacts * interact)
{
double c, d, halfl;
struct vector mindist_segments(struct vector dir1, double halfl1,
struct vector dir2, double halfl2, struct vector r_cm);
double linemin(double, double);
//printf("we have %d %d ",interact->param->geotype[0],interact->param->geotype[1] );
if ((interact->param->geotype[0] >= SP) && (interact->param->geotype[1] >= SP)) { /*we have two spheres - most common, do nothing*/
//printf("we have two spheres ");
interact->distvec = interact->r_cm;
interact->dist = sqrt(interact->dotrcm);
interact->distcm = interact->dist;
} else {
if ((interact->param->geotype[0] < SP) && (interact->param->geotype[1] < SP)) { /*we have two spherocylinders*/
interact->distvec = mindist_segments(interact->part1->dir,interact->param->half_len[0],
interact->part2->dir, interact->param->half_len[1], interact->r_cm);
interact->dist=sqrt(DOT(interact->distvec,interact->distvec));
} else {
if (interact->param->geotype[0] < SP) { /*We have one spherocylinder -it is first one*/
halfl=interact->param->half_len[0];/*finding closest vector from sphyrocylinder to sphere*/
c = DOT(interact->part1->dir,interact->r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
interact->contt = c;
interact->distvec.x = - interact->r_cm.x + interact->part1->dir.x * d;
interact->distvec.y = - interact->r_cm.y + interact->part1->dir.y * d;
interact->distvec.z = - interact->r_cm.z + interact->part1->dir.z * d;
interact->dist=sqrt(DOT(interact->distvec,interact->distvec));
} else { /*lst option first one is sphere second one spherocylinder*/
halfl=interact->param->half_len[1]; /*finding closest vector from sphyrocylinder to sphere*/
c = DOT(interact->part2->dir,interact->r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
interact->contt = -c;
interact->distvec.x = interact->r_cm.x - interact->part2->dir.x * d;
interact->distvec.y = interact->r_cm.y - interact->part2->dir.y * d;
interact->distvec.z = interact->r_cm.z - interact->part2->dir.z * d;
interact->dist=sqrt(DOT(interact->distvec,interact->distvec));
}
}
}
}
/*..............................................................................*/
/*
Determines energy of two particles
*/
double paire(long num1, long num2, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf)
{
double energy=0.0; /* energy*/
struct vector r_cm; /* Vector between centres of mass from part2 to part1*/
struct interacts interact; /*interaction parameters*/
double bondenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf);
double angleenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf);
/*Placing interactin particle in unit box and finding vector connecting CM*/
/*r_cm = image(part1.pos, part2.pos, box); explicit statement below for performance optimization*/
r_cm.x = conf->particle[num1].pos.x - conf->particle[num2].pos.x;
r_cm.y = conf->particle[num1].pos.y - conf->particle[num2].pos.y;
r_cm.z = conf->particle[num1].pos.z - conf->particle[num2].pos.z;
if ( r_cm.x < 0 )
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) );
else
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) );
if ( r_cm.y < 0 )
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) );
else
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) );
if ( r_cm.z < 0 )
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) );
else
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) );
interact.dotrcm = DOT(r_cm,r_cm);
if ( interact.dotrcm > topo->sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */
interact.r_cm=r_cm;
interact.contt = 0;
interact.distvec.x = 0;
interact.distvec.y = 0;
interact.distvec.z = 0;
interact.box = conf->box;
interact.part1 = &conf->particle[num1];
interact.part2 = &conf->particle[num2];
interact.param = topo->ia_params[conf->particle[num1].type] + conf->particle[num2].type;
if(intfce[conf->particle[num1].type][conf->particle[num2].type] == NULL){
fprintf(stderr, "interaction function for type %d and %d not defined!\n",
conf->particle[num1].type, conf->particle[num2].type);
}
energy = (*intfce[conf->particle[num1].type][conf->particle[num2].type])( &interact);
//printf("num: %ld %ld e: %f dist: %f",num1,num2,energy,interact.dist);
energy += bondenergy ( num1, num2, &interact, topo, conf);
energy += angleenergy ( num1, num2, &interact, topo, conf);
//printf(" e: %f\n",energy);
return energy;
}
/*...........................................................................*/
/*Calculates interaction of target particle and external field version 2
calculate projection of spherocylinder in direction of patch and calculate
interacting line segment within cutoff
*/
double extere2 (long target, struct topo * topo, struct conf * conf)
{
double repenergy=0.0,atrenergy=0.0; /* energy*/
double rcmz; /* z distance between*/
double ndist; /* distance for CM of interacting line segment*/
double interendz; /* z coordinate of interaction end*/
struct interacts interact; /* interaction parameters*/
double orient;
double halfl;
BOOL positive, orientin;
struct vector olddir;
struct vector project; /*vector for projection down to plane */
double erepulsive(struct interacts *);
// struct vector vec_perpproject(struct vector*, struct vector*);
// void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient,
double *rcmz,double *interendz, struct vector *project);
double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive,
double orient,struct vector *project, double *ndist,int, double );
/* calcualte distance to center of mass*/
if ( conf->particle[target].pos.z < 0 ) {
rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z - 0.5) ) );
} else {
rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z + 0.5) ) );
}
project.x=0;
project.y=0;
if (rcmz < 0) {
interact.dist = -rcmz;
positive = FALSE;
interendz = -1.0;
project.z = 1.0;
} else {
interact.dist = rcmz;
positive = TRUE;
interendz = 1.0;
project.z = -1.0;
}
interact.dotrcm = rcmz * rcmz;
if ( interact.dotrcm > topo->exter.sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */
interact.distvec.z = interact.r_cm.z;
interact.distcm = interact.dist;
interact.box = conf->box;
interact.part1 = &conf->particle[target];
interact.param = &topo->exter.interactions[conf->particle[target].type];
halfl = 0.5* topo->exter.interactions[conf->particle[target].type].len[0];
ndist = interact.dist;
orientin = TRUE;
orient = 0.0;
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
/* now we have closest distance so we can calculate repulsion*/
repenergy = erepulsive(&interact);
//printf("dist: %f",interact.dist);
/*save chiral stuff*/
olddir = interact.part1->dir;
if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)) {
interact.part1->dir = interact.part1->chdir[0];
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
}
if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) ||
( (interact.part1->patchdir[0].z >0)&&(positive) ) || ( (interact.part1->patchdir[0].z <0)&&(!(positive)) ) )
atrenergy = 0.0;
else {
atrenergy = exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,0,halfl);
}
if ((interact.param->geotype[0] == TCPSC)||(interact.param->geotype[0] == TPSC)||
(interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) {
if ((interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) {
interact.part1->dir = interact.part1->chdir[1];
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
}
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) ||
( (interact.part1->patchdir[1].z >0)&&(positive) ) || ( (interact.part1->patchdir[1].z <0)&&(!(positive)) ) )
atrenergy += 0.0;
else {
atrenergy += exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,1,halfl);
}
}
if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)||
(interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC) ) {
interact.part1->dir = olddir;
}
//printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,repenergy+atrenergy);
return repenergy+atrenergy;
}
double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch, double halfl)
{
struct vector pbeg,pend; /* projected spherocylinder begining and end*/
double a,length1,length2, f0,f1;
struct vector cm1,cm2; /* centrar of interacting segments */
int line;
struct vector partbeg,partend; /*closest and furthest point of particle*/
struct vector inters;
double atrenergy=0.0;
int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz,
double * cut, struct vector* partbeg, struct vector* partend);
int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend);
/*interaction with PATCHY SPHEROCYLINDERS*/
if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) {
//printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z);
//printf("patchdir: %f %f %f \n ",interact->part1->patchdir[0].x,interact->part1->patchdir[0].y,interact->part1->patchdir[0].z);
/* calculate position of closest and furthest point (begining and end of spherocylinder)*/
a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */
partbeg.x = a * interact->part1->dir.x * halfl;
partbeg.y = a * interact->part1->dir.y * halfl;
partbeg.z = *rcmz + a * interact->part1->dir.z *halfl;
partend.x = - a * interact->part1->dir.x * halfl;
partend.y = - a * interact->part1->dir.y * halfl;
partend.z = *rcmz - a * interact->part1->dir.z * halfl;
//printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z);
/*calculate interacting line segment and its cm of spherocylinder*/
/*calculate end point z*/
if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < 2.0*halfl ){
/*if cutoff goes through spherocylinder the end point is at cutoff*/
*interendz *= interact->param->rcut;
} else {
/*endpoint is at the end of spherocylinders*/
*interendz = partend.z;
}
/*calculate CM of interacting line segment of spherocylinder*/
if (*positive) {
cm1.z = AVER(*interendz,interact->dist);
} else {
cm1.z = AVER(*interendz,-interact->dist);
}
if (interact->part1->dir.z != 0.0 ) {
a = (*interendz - cm1.z ) / interact->part1->dir.z;
length1= -orient*2.0*a;
a = a + orient*halfl;
} else {
a = 0.0;
length1 = 2.0*halfl;
}
//printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact.dist);
cm1.x = interact->part1->dir.x * a;
cm1.y = interact->part1->dir.y * a;
/* we have interacting segment*/
if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) {
/*CPSC type*/
if ( ((*interendz >= interact->dist)&&(*positive)) || ((*interendz <= -interact->dist)&&(!(*positive))) ){
/*test if projection is not all out of interaction*/
line = cpsc_wall(&pbeg,&pend,project,&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
} else {
line = 0;
}
} else {
/*PSC and CHPSC interaction with wall */
line = psc_wall(&pbeg,&pend,project,&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
}
if (line > 0) {
/*cm2 by average begining and end*/
cm2.x = AVER(pbeg.x,pend.x);
cm2.y = AVER(pbeg.y,pend.y);
cm2.z = 0.0;
/*length by size of end-benining*/
length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) );
inters.x = cm2.x - cm1.x;
inters.y = cm2.y - cm1.y;
inters.z = cm2.z - cm1.z;
//printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z);
*ndist = sqrt(DOT(inters,inters));
if (*ndist < interact->param->pdis) {
atrenergy = -interact->param->epsilon;
}
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/* scaling function1: dependence on the length of intersetions plus*/
f0=(length1 + length2)*0.5;
/*scaling with angle*/
f1 = fabs(interact->part1->patchdir[numpatch].z);
atrenergy *= f0*f1;
//printf(" %f %f %f %f %f %f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist);
//printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y);
} else {
atrenergy = 0.0;
}
} else {
if (*ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */
atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ;
}
return atrenergy;
}
void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project)
{
if (*rcmz < 0) {
interact->dist = -(*rcmz);
*positive = FALSE;
*interendz = -1.0;
project->z = 1.0;
} else {
interact->dist = (*rcmz);
*positive = TRUE;
*interendz = 1.0;
project->z = -1.0;
}
/*psc closest is allways end closer to wall*/
if (interact->param->geotype[0] < SP ){
/*calculate closest point distance*/
if (interact->part1->dir.z > 0) {
if (*positive) {
*orientin = FALSE;
*orient = -1.0;
interact->dist = *rcmz -interact->part1->dir.z * interact->param->half_len[0];
} else {
*orientin = TRUE;
*orient = 1.0;
interact->dist = -( *rcmz + interact->part1->dir.z * interact->param->half_len[0]);
}
} else {
if (*positive) {
*orientin = TRUE;
*orient = 1.0;
interact->dist = *rcmz + interact->part1->dir.z * interact->param->half_len[0];
} else {
*orientin = FALSE;
*orient = -1.0;
interact->dist = -( *rcmz -interact->part1->dir.z * interact->param->half_len[0]);
}
}
}
}
/*...........................................................................*/
/*Calculates interaction of target particle and external field
calculate projection of patch of spherocylinder on wall
evaluate intersection area and calculate interaction from that
*/
double exter_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch,double halfl)
{
double area,a,b,c,r2;
double atrenergy=0.0; /* energy*/
BOOL countend;
struct vector cm1,cm2; /* centrar of interacting segments */
struct vector pbeg,pend; /* projected spherocylinder begining and end*/
struct vector inters,newdir;
struct vector pbeg1,pend1,pbeg2,pend2,pextr1,pextr2,pextr3,pextr4; /*additinal point of projected patch for calculation of area */
double length1, cuttoproject, f0;
int line, line1, line2,extra;
struct vector partbeg,partend; /*closest and furthest point of particle*/
double erepulsive(struct interacts *);
struct vector vec_perpproject(struct vector*, struct vector*);
void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
struct vector vec_create(double, double, double);
double areaeightpoints(struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*);
int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz,
double * cut, struct vector* partbeg, struct vector* partend);
int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend);
int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4,
struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg,
struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin);
void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project);
/*interaction with PATCHY SPHEROCYLINDERS*/
if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) {
//printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z);
//printf("patchdir: %f %f %f \n ",interact->part1->patchdir[numpatch].x,interact->part1->patchdir[numpatch].y,interact->part1->patchdir[numpatch].z);
/* calculate position of closest and furthest point (begining and end of spherocylinder)*/
a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */
partbeg.x = a * interact->part1->dir.x * halfl;
partbeg.y = a * interact->part1->dir.y * halfl;
partbeg.z = *rcmz + a * interact->part1->dir.z * halfl;
partend.x = - a * interact->part1->dir.x * halfl;
partend.y = - a * interact->part1->dir.y * halfl;
partend.z = *rcmz - a * interact->part1->dir.z * halfl;
//printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z);
/*calculate interacting line segment and its cm of spherocylinder*/
/*calculate end point z*/
if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < halfl*2.0 ){
/*if cutoff goes through spherocylinder the end point is at cutoff*/
*interendz *= interact->param->rcut;
} else {
/*endpoint is at the end of spherocylinders*/
*interendz = partend.z;
}
/*calculate CM of interacting line segment of spherocylinder*/
if (*positive) {
cm1.z = AVER(*interendz,interact->dist);
} else {
cm1.z = AVER(*interendz,-interact->dist);
}
if (interact->part1->dir.z != 0.0 ) {
a = (*interendz - cm1.z ) / interact->part1->dir.z;
length1= -orient*2.0*a;
a = a + orient*halfl;
} else {
a = 0.0;
length1 = 2.0*halfl;
}
//printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact->dist);
cm1.x = interact->part1->dir.x * a;
cm1.y = interact->part1->dir.y * a;
/*calculate projection on wall as infinite line and make it interacting segment*/
if (interact->part1->patchdir[numpatch].z != 0) {
cuttoproject = -interact->param->rcut*interact->part1->patchdir[numpatch].z; /*z coordinate of point where projection is in cut distance*/
if ( ((partend.z < cuttoproject)&&(*positive)) || ((cuttoproject < partend.z)&&(!(*positive))) ){
cuttoproject = partend.z;
}
} else {
cuttoproject = partbeg.z;
}
//printf("cutproject %f \n",cuttoproject);
//printf("cm1 %f %f %f \n",cm1.x, cm1.y,cm1.z );
/* we have interacting segment*/
if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) {
/*CPSC type*/
if ( ((cuttoproject >= interact->dist)&&(*positive)) || ((cuttoproject <= -interact->dist)&&(!(*positive))) ){
/*test if projection is not all out of interaction*/
line = cpsc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
} else {
line = 0;
}
} else {
/*PSC and CHPSC interaction with wall */
line = psc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
}
if (line > 0) {
area = 0.0;
/*project cutoff boudaries*/
if (line == 2 ) {
/*if projection end is on sphere of begining don't care about cylinder cutoff*/
extra = 0;
} else {
extra = cutprojectatwall(&pextr1, &pextr2, &pextr3, &pextr4, &interact->part1->patchdir[numpatch], \
&interact->part1->dir, &interact->param->rcut, &partbeg, &partend,&pend,&cuttoproject,orientin);
}
//printf("extr1: %d %f %f extr2 %f %f extr3 %f %f extr4 %f %f \n",extra,pextr1.x,pextr1.y,pextr2.x,pextr2.y,pextr3.x,pextr3.y,pextr4.x,pextr4.y);
/*project patch boundaries on the first side*/
newdir=interact->part1->patchsides[0+2*numpatch];
line1 = cpsc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) {
line1 = psc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
}
//printf("line1: %d beg1 %f %f end1 %f %f \n",line1,pbeg1.x,pbeg1.y,pend1.x,pend1.y);
/*project patch boundaries on the second side*/
newdir=interact->part1->patchsides[1+2*numpatch];
line2 = cpsc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) {
line2 = psc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
}
//printf("line2: %d beg2 %f %f end2 %f %f \n",line2,pbeg2.x,pbeg2.y,pend2.x,pend2.y);
/*calculate area*/
if (extra == 0) {
/*thish should only happen when there is PSC interacting only with end*/
if (line1 == 0) {
if (line2==0) {
/*circle around middle-pbeg*/
area = PI*( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y));
}
else{
/* circle around middle-pbeg minus circle segment*/
a = AVER(pbeg2.x,pend2.x);
b = AVER(pbeg2.y,pend2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c);
}
} else {
if (line2==0) {
/* circle around middle-pbeg minus circle segment*/
a = AVER(pbeg1.x,pend1.x);
b = AVER(pbeg1.y,pend1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c);
} else {
//area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */
/*circle minus two circle segments*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area = r2*PI;
a = AVER(pbeg1.x,pend1.x);
b = AVER(pbeg1.y,pend1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c);
a = AVER(pbeg2.x,pend2.x);
b = AVER(pbeg2.y,pend2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c);
}
}
} else {
b = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend.y)- (pextr2.x-pend.x)*(pextr4.y-pextr2.y));/*pend on 42*/
c = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend.y)- (pextr3.x-pend.x)*(pextr1.y-pextr3.y));/*pend on 13*/
if ( ( b< ZEROTOL) || ( c< ZEROTOL) )
countend = FALSE;
else
countend = TRUE;
if (line1 == 0) {
if (line2 == 0) {
if ( countend ) {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pextr1,NULL,NULL);/* B 2 4 E 3 1 */
} else
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pextr1,NULL,NULL,NULL);/* B 2 4 3 1 */
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /*pend2 on 42*/ {
if ( countend ) {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */
}
} else {
a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend2.y)- (pextr3.x-pend2.x)*(pextr1.y-pextr3.y));
if ( a< ZEROTOL) /*pend2 on 13*/ {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr1,NULL,NULL,NULL,NULL); /* B B2 E2 1 */
} else { /*pend2 on 34 or on begining sphere of psc*/
if (line2 == 2) {
if ( countend ) {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */
}
} else {
if ( countend ) {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 E 3 1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pextr1,NULL,NULL,NULL); /* B B2 E2 3 1 */
}
}
}
}
}
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend1.y)- (pextr2.x-pend1.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /*pend1 on 42*/ {
if (line2 == 0) {
area = areaeightpoints(&pbeg,&pextr2,&pend1,&pbeg1,NULL,NULL,NULL,NULL); /* B 2 E1 B1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */
}
} else {
a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend1.y)- (pextr3.x-pend1.x)*(pextr1.y-pextr3.y));
if ( a< ZEROTOL) /*pend1 on 13*/ {
if (line2 == 0) {
if (countend) {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1 */
} else {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1 */
}
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /*pend2 on 42*/ {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */
} else {
a = fabs((pextr3.x-pextr1.x)*(pextr1.y-pend2.y)- (pextr1.x-pend2.x)*(pextr3.y-pextr1.y));
if ( a< ZEROTOL) /*pend2 on 31*/ {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */
} else { /*pend2 close to 34 or on begining sphere of psc*/
if (line2 == 2) {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */
} else {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */
}
}
}
}
} else {/*pend1 close to 34 or on beging sphere for psc*/
if (line2 == 0) {
if (line1 ==2) {
if (countend)
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1*/
else {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1*/
}
} else {
if (countend)
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pend1,&pbeg1,NULL,NULL); /* B 2 4 E E1 B1*/
else {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend1,&pbeg1,NULL,NULL,NULL); /* B 2 4 E1 B1*/
}
}
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /* pend2 on 42 */ {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */
} else { /*pend1 and pend2 close to 34 or on beging sphere for psc*/
if (line2 == 2) {
if (line1 == 2) {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */
} else {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */
}
} else {
if (line1 == 2) {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */
} else {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */
}
}
}
}
}
}
} /*extra != 0*/
if ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) {
if (line1==2) {
/* add circle segment*/
a = AVER(pextr1.x,pend1.x); /*end to cutoff - pextr1 ,pend1 */
b = AVER(pextr1.y,pend1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pend1.x)*(partbeg.x-pend1.x) + (partbeg.y-pend1.y)*(partbeg.y-pend1.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
a = AVER(pbeg.x,pbeg1.x); /* between beginings - pbeg ,pbeg1 */
b = AVER(pbeg.y,pbeg1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
} else {
if (line1==0) {
/* add circle segment*/
a = AVER(pextr1.x,pbeg.x); /* begining to cutoff*/
b = AVER(pextr1.y,pbeg.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
}
}
if (line2==2) {
/* add circle segment*/
a = AVER(pextr3.x,pend2.x); /*end to cutoff - pextr3 ,pend2 */
b = AVER(pextr3.y,pend2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pend2.x)*(partbeg.x-pend2.x) + (partbeg.y-pend2.y)*(partbeg.y-pend2.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
a = AVER(pbeg.x,pbeg2.x); /* between beginings - pbeg ,pbeg2 */
b = AVER(pbeg.y,pbeg2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
} else {
if (line2==0) {
/* add circle segment*/
a = AVER(pextr3.x,pbeg.x); /* begining to cutoff*/
b = AVER(pextr3.y,pbeg.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
}
}
}
}
/*area finished*/
/*cm2 by average begining and end*/
cm2.x = AVER(pbeg.x,pend.x);
cm2.y = AVER(pbeg.y,pend.y);
cm2.z = 0.0;
/*length by size of end-benining*/
//length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) );
inters.x = cm2.x - cm1.x;
inters.y = cm2.y - cm1.y;
inters.z = cm2.z - cm1.z;
//printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z);
*ndist = sqrt(DOT(inters,inters));
if (*ndist < interact->param->pdis) {
atrenergy = -interact->param->epsilon;
}
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/* scaling function1: dependence on the length of intersetions plus SCALING WITH AREA*/
f0=(length1 + area / interact->param->sigma)*0.5;
atrenergy *= f0;
//printf(" %f %f %f %f %f %f %f %d %d %d \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist,extra,line1,line2);
//printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y);
//printf("%f %f %f %f %f %f\n",pbeg2.x,pend2.y,pextr2.x,pextr2.y,pextr1.x,pextr1.y);
} else {
atrenergy = 0.0;
}
} else {
if (*ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */
atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ;
}
//printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy);
return atrenergy;
}
/*..............................................................................*/
/* Initializes the array with the pointers to the energy function
*/
void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo){
// NB
// Fill in the names of the functions for calculating the
// interaction energy
long geotype, other_geotype;
int i, j;
for(i = 0; i < MAXT; i++){
for(j = 0; j < MAXT; j++){
/* Initialize them as not existing */
intfce[i][j] = &enoexist;
geotype = topo->ia_params[i][j].geotype[0];
other_geotype = topo->ia_params[i][j].geotype[1];
if ( ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) &&
(other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ) ||
( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) &&
(other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ) ) {
intfce[i][j] = &e_psc_cpsc;
}
if ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) &&
(other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ){
intfce[i][j] = &e_cpsc_cpsc;
}
if ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) &&
(other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ){
intfce[i][j] = &e_psc_psc;
}
if(geotype == SCN || geotype == SPN
|| other_geotype == SCN || other_geotype == SPN){
intfce[i][j] = &e_spn_or_scn;
}
if((geotype == SCA && other_geotype == SCA)
|| (geotype == SPA && other_geotype == SPA)){
intfce[i][j] = &e_2sca_or_2spa;
}
if((geotype == SCA && other_geotype == SPA)
|| (geotype == SPA && other_geotype == SCA)){
intfce[i][j] = &e_spa_sca;
}
if(( (geotype == PSC || geotype == CHPSC || geotype == TCHPSC || geotype == TPSC) && other_geotype == SPA)
|| (geotype == SPA && (other_geotype == PSC||other_geotype == CHPSC || other_geotype == TCHPSC || other_geotype == TPSC) )){
intfce[i][j] = &e_psc_spa;
}
if(( (geotype == CPSC ||geotype == CHCPSC || geotype == TCHCPSC || geotype == TCPSC) && other_geotype == SPA)
|| (geotype == SPA && (other_geotype == CPSC||other_geotype == CHCPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) )){
intfce[i][j] = &e_cpsc_spa;
}
}
}
}
/*..............................................................................*/
/*
Compare energy change to temperature and based on Boltzmann probability
return either 0 to accept or 1 to reject the move
*/
int movetry(double energyold, double energynew, double temperature)
{
double ran2(long *);
/*DEBUG printf (" Move trial: %13.8lf %13.8lf %13.8lf %13.8lf\n",
energynew, energyold, temperature, ran2(&seed));*/
if (energynew <= energyold )
return 0;
else if (exp(-1.0*(energynew-energyold)/temperature) > ran2(&seed))
return 0;
else
return 1;
}
/*..............................................................................*/
/*
* Calculate the different energy contributions. This is a merge of the different
* energy calculation functions (energyone, -chain, -all)
* 0: all
* 1: one
* 2: chain
*/
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim, int chainnum)
{
long i=0,j=0;
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
// double extere(long, struct topo * topo, struct conf * conf);
double extere2(long, struct topo * topo, struct conf * conf);
//DEBUG_SIM("Calculate the energy with mode %d", mode)
double energy = 0;
/* Calculates energy between particle "target" and the rest. Returns energy */
if(mode == 1){
if (sim->pairlist_update) {
#ifdef OMP
#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
#endif
for (i = 0; i < sim->pairlist[target].num_pairs; i++){
energy+= paire(target, sim->pairlist[target].pairs[i], intfce, topo, conf);
}
}
else{
#ifdef OMP
#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
#endif
for (i = 0; i < target; i++) {
energy+= paire(target, i, intfce, topo, conf);
}
#ifdef OMP
#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
#endif
for (i = target + 1; i < topo->npart; i++) {
energy+= paire(target, i, intfce, topo, conf);
}
}
/*add interaction with external potential*/
if (topo->exter.exist)
energy+= extere2(target,topo,conf);
}
/*
* Calculates energy between particle "target" and the rest. skipping
* particles from the given chain -particles has to be sorted in chain!!
* so similar to energy one but with chain exception
*/
else if(mode == 2){
//#ifdef OMP
//#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
//#endif
for (i = 0; i < target; i++) {
if (i != topo->chainlist[chainnum][j]) {
energy+= paire(target, i, intfce, topo, conf);
}
else {
j++;
}
}
j++;
//#ifdef OMP
//#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
//#endif
for (i = target + 1; i < topo->npart; i++) {
if (i != topo->chainlist[chainnum][j]) {
energy+= paire(target, i, intfce, topo, conf);
}
else {
j++;
}
}
/*add interaction with external potential*/
if (topo->exter.exist)
energy+= extere2(target,topo,conf);
}
/* Calculates energy between all pairs. Returns energy */
else if(mode == 0){
#ifdef OMP
#pragma omp parallel for private(i,j) reduction (+:energy) schedule (dynamic)
#endif
for (i = 0; i < topo->npart - 1; i++) {
for (j = i + 1; j < topo->npart; j++) {
energy+= paire(i, j, intfce, topo, conf);
}
/*for every particle add interaction with external potential*/
if (topo->exter.exist)
energy+= extere2(i,topo,conf);
}
/*add interaction of last particle with external potential*/
if (topo->exter.exist)
energy+= extere2(topo->npart-1,topo,conf);
}
else {
fprintf(stderr, "ERROR: Wrong mode (%d) was given to calc_energy!", mode);
return 0.0;
}
// DEBUG_SIM("Will return energy from calc_energy")
//printf("energymove %f\n",energy);
return energy;
}
/*..............................................................................*/
/*
Checks for overlaps between particle "target" and the rest. Returns 1 if overlap
detected, 0 otherwise.
*/
int forbidden(long npart, struct particles *particle,
long target, struct vector box, struct ia_param ia_params[MAXT][MAXT])
{
long test;
int overlap(struct particles, struct particles, struct vector,struct ia_param [MAXT][MAXT]);
for (test=0; test<npart; test++) {
if (test != target) {
if ( overlap(particle[target], particle[test], box, ia_params) ) {
return 1;
}
}
}
return 0;
}
/*..............................................................................*/
/*
Checks for overlaps between all pairs of particles. Returns 1 if overlap
detected, 0 otherwise.
*/
int checkall(long npart, struct particles *particle,
struct vector box, struct ia_param ia_params[MAXT][MAXT])
{
long i, j;
int overlap(struct particles, struct particles, struct vector,
struct ia_param [MAXT][MAXT]);
for (i=0; i<npart-1; i++) {
for (j=i+1; j<npart; j++) {
if ( overlap(particle[i], particle[j], box, ia_params) ) {
return 1;
}
}
}
return 0;
}
/*..............................................................................*/
/*
Optimize the maximum displacement within the specified limits and resets the
acceptance counters to zero.
*/
void optimizestep(struct disp *x, double hi, double lo)
{
double newrmsd;
newrmsd = (*x).mx * RATIO(*x);
if ((*x).oldrmsd > 0) {
if ( newrmsd < (*x).oldrmsd ) {
if ( (*x).oldmx > 1 ) {
(*x).mx /= 1.05;
(*x).oldmx = 0.95;
} else {
(*x).mx *= 1.05;
(*x).oldmx = 1.05;
}
} else {
if ( (*x).oldmx > 1 ) {
(*x).mx *= 1.05;
(*x).oldmx = 1.05;
} else {
(*x).mx /= 1.05;
(*x).oldmx = 0.95;
}
}
}
if (newrmsd > 0 ) (*x).oldrmsd = newrmsd;
else {
(*x).oldrmsd = 0.0;
(*x).mx /= 1.05;
(*x).oldmx = 0.95;
}
if ( (*x).mx > hi ) (*x).mx = hi;
if ( (*x).mx < lo ) (*x).mx = lo;
(*x).acc = (*x).rej = 0;
}
/*..............................................................................*/
/*
Optimize the maximum rotation within the specified limits and resets the
acceptance counters to zero. Rotation is given by cos of angle
larger rotation = smaller cos
*/
void optimizerot(struct disp *x, double hi, double lo)
{
double newrmsd;
newrmsd = (*x).mx * RATIO((*x)) ;
if ((*x).oldrmsd > 0) {
if ( newrmsd > (*x).oldrmsd ) {
if ( (*x).oldmx > 1) {
(*x).mx *= 0.99;
(*x).oldmx *= 0.99;
} else {
(*x).mx *= 1.01;
(*x).oldmx *= 1.01;
}
} else {
if ( (*x).oldmx > 1) {
(*x).mx *= 1.01;
(*x).oldmx *= 1.01;
} else {
(*x).mx *= 0.99;
(*x).oldmx *= 0.99;
}
}
}
if (newrmsd > 0 ) (*x).oldrmsd = newrmsd;
else {
(*x).oldrmsd = 0.0;
(*x).mx *= 1.01;
(*x).oldmx = 1.01;
}
if ( (*x).mx > hi ) (*x).mx = hi;
if ( (*x).mx < lo ) (*x).mx = lo;
(*x).acc = (*x).rej = 0;
}
/*................................................................................*/
/*
Accumulate a value into the statistics and update the mean and rms values.
*/
void accumulate(struct stat *q, double x)
{
(*q).sum += x;
(*q).sum2 += x*x;
(*q).samples++;
(*q).mean = (*q).sum / (*q).samples;
(*q).rms = sqrt(fabs((*q).sum2 / (*q).samples -
(*q).sum * (*q).sum / (*q).samples / (*q).samples));
}
void printeqstat(struct disp *dat, double scale, int length)
{
int i;
for (i=0;i<length;i++) {
if (RATIO(dat[i]) > 0)
printf (" TYPE %d %.6lf / %.6lf\n", i, dat[i].mx/scale,RATIO(dat[i]));
}
}
int memoryalloc(struct conf * conf)
{
printf ("Allocating memory...\n");
conf->particle = malloc( sizeof(struct particles)*MAXN);
if(conf->particle == NULL){
return 1;
}
return 0;
}
int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim)
{
int dealloc_pairlist(struct topo * topo, struct sim * sim);
printf ("Deallocating memory...\n");
if (conf->particle != NULL)
free(conf->particle);
conf->particle = NULL;
if (sim->clusterlist != NULL)
free(sim->clusterlist);
if (sim->clustersenergy != NULL)
free(sim->clustersenergy);
if(topo->switchlist){
free(topo->switchlist);
}
if (sim->pairlist_update) {
if(dealloc_pairlist(topo, sim)){
return 1;
}
}
return 0;
}
/*............................................................................*/
/**
* nice malloc, which does the error checking for us
*/
void * xmalloc (size_t num){
void *new = malloc (num);
if (!new){
fprintf(stderr, "Couldn't allocate any memory!\n");
exit(1);
}
return new;
}
/*............................................................................*/
/* *********************** GEOMETRICAL FUNCTIONS **************************** */
/*.........................PATCHY SPOHEROCYLINDERS INTERACTION....................*/
/*................................................................................*/
/*
Calculate intersections of sc2 with a patch of sc1 and return them in
*/
int psc_intersect(struct particles * part1, struct particles * part2,
double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut,
struct ia_param * param, int which, int patchnum)
{
int intrs;
double a, b, c, d, e, x1, x2, rcut2;
struct vector cm21, vec1, vec2, vec3, vec4;
struct vector vec_crossproduct(struct vector, struct vector);
struct vector vec_sub(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct quat quat_create(struct vector, double, double);
void vec_rotate(struct vector *, struct quat);
void normalise(struct vector *);
int find_intersect_plane(struct particles *, struct particles *, double,
struct vector, struct vector, double, double, double *);
int test_intrpatch(struct particles *, struct vector, double, double, double *,int);
intrs=0;
rcut2=rcut*rcut;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at
cut distance C*/
/*1a- test intersection with half planes of patch and look how far they are
from spherocylinder. If closer then C we got itersection*/
/* plane1 */
/* find intersections of part2 with plane by par1 and patchsides[0] */
intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
// printf("plane1 %d\n", intrs);
/* plane2 */
/* find intersections of part2 with plane by par1 and patchsides[1] */
intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] <0) ) {
fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n");
exit (1);
}
// printf("plane2 %d\n", intrs);
/*1b- test intersection with cylinder - it is at distance C*/
if (intrs < 2 ) {
cm21=vec_scale(r_cm,-1.0);
vec1=vec_crossproduct(cm21,part1->dir);
vec2=vec_crossproduct(part2->dir,part1->dir);
a = DOT(vec2,vec2);
b = 2*DOT(vec1,vec2);
c = -rcut*rcut + DOT(vec1,vec1);
d = b*b - 4*a*c;
if ( d >= 0) { /*there is intersection with infinite cylinder */
x1 = (-b+sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
/* vectors from center os sc1 to intersection with infinite cylinder*/
vec1.x=part2->dir.x*x1-r_cm.x;
vec1.y=part2->dir.y*x1-r_cm.y;
vec1.z=part2->dir.z*x1-r_cm.z;
e = DOT(part1->dir,vec1);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0 ){
x2 = (-b-sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec2.x = part2->dir.x*x2-r_cm.x;
vec2.y = part2->dir.y*x2-r_cm.y;
vec2.z = part2->dir.z*x2-r_cm.z;
e = DOT(part1->dir,vec2);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
}
// printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e);
/*1c- test intersection with spheres at the end - it is at distace C*/
if (intrs < 2 ) {
/*centers of spheres*/
/*relative to the CM of sc2*/
vec1.x = part1->dir.x*halfl1 - r_cm.x;
vec1.y = part1->dir.y*halfl1 - r_cm.y;
vec1.z = part1->dir.z*halfl1 - r_cm.z;
vec2.x = -part1->dir.x*halfl1 - r_cm.x;
vec2.y = -part1->dir.y*halfl1 - r_cm.y;
vec2.z = -part1->dir.z*halfl1 - r_cm.z;
/*sphere1*/
a = DOT(part2->dir,part2->dir);
b = 2.0*DOT(vec1,part2->dir);
c = DOT(vec1,vec1)-rcut*rcut;
d = b*b-4*a*c;
if (d >= 0) { /*if d<0 there are no intersections*/
x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec3.x = part2->dir.x*x1-r_cm.x;
vec3.y = part2->dir.y*x1-r_cm.y;
vec3.z = part2->dir.z*x1-r_cm.z;
e = DOT(part1->dir,vec3);
if ((e >= halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0) {
x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec4.x = part2->dir.x*x2 - r_cm.x;
vec4.y = part2->dir.y*x2 - r_cm.y;
vec4.z = part2->dir.z*x2 - r_cm.z;
e = DOT(part1->dir,vec4);
if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
// printf ("sphere1 %d x1 %f x2 %f e %f\n", intrs, x1, x2, e);
/*sphere2*/
a = DOT(part2->dir,part2->dir);
b = 2.0*DOT(vec2,part2->dir);
c = DOT(vec2,vec2)-rcut*rcut;
d = b*b-4*a*c;
if (d >= 0) { /*if d<0 there are no intersections*/
x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec3.x = part2->dir.x*x1 - r_cm.x;
vec3.y = part2->dir.y*x1 - r_cm.y;
vec3.z = part2->dir.z*x1 - r_cm.z;
e = DOT(part1->dir,vec3);
if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0 ) {
x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec4.x = part2->dir.x*x2 - r_cm.x;
vec4.y = part2->dir.y*x2 - r_cm.y;
vec4.z = part2->dir.z*x2 - r_cm.z;
e = DOT(part1->dir,vec4);
if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
// printf ("sphere2 %d\n", intrs);
}
/*1d- if there is only one itersection shperocylinder ends within patch wedge
set as second intersection end inside patch*/
if (intrs < 2 ) {
/*whole spherocylinder is in or all out if intrs ==0*/
vec1.x = part2->dir.x*halfl2 - r_cm.x;
vec1.y = part2->dir.y*halfl2 - r_cm.y;
vec1.z = part2->dir.z*halfl2 - r_cm.z;
/*vector from CM of sc1 to end of sc2*/
/*check is is inside sc1*/
a=DOT(vec1,part1->dir);
vec3.x = vec1.x - part1->dir.x*a;
vec3.y = vec1.y - part1->dir.y*a;
vec3.z = vec1.z - part1->dir.z*a;
b=DOT(vec3,vec3);
d = fabs(a)-halfl1;
if ( d <= 0)
c = b; /*is inside cylindrical part*/
else
c = d*d + b; /*is inside caps*/
/*c is distance squared from line or end to test if is inside sc*/
if (c < rcut2)
intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum);
if (intrs < 2 ) {
vec2.x = -part2->dir.x*halfl2 - r_cm.x;
vec2.y = -part2->dir.y*halfl2 - r_cm.y;
vec2.z = -part2->dir.z*halfl2 - r_cm.z;
/*check is is inside sc1*/
a=DOT(vec2,part1->dir);
vec4.x = vec2.x - part1->dir.x*a;
vec4.y = vec2.y - part1->dir.y*a;
vec4.z = vec2.z - part1->dir.z*a;
b=DOT(vec4,vec4);
d = fabs(a) -halfl1;
if (d <= 0)
c = b; /*is inside cylindrical part*/
else
c = d*d + b; /*is inside caps*/
/*c is distance squared from line or end to test if is inside sc*/
if (c < rcut2)
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum);
}
// printf ("ends %d\n", intrs);
}
return intrs;
}
/*................................................................................*/
/*
Find if vector vec has angular intersection with patch of sc1
*/
int test_intrpatch(struct particles * part1, struct vector vec, double cospatch,
double ti, double intersections[5],int patchnum)
{
double a;
int i, intrs;
struct vector vec_perpproject(struct vector*, struct vector*);
void normalise(struct vector *);
intrs=0;
/*test if we have intersection*/
/* do projection to patch plane*/
vec=vec_perpproject(&vec,&part1->dir);
normalise(&vec);
/* test angle distance from patch*/
a = DOT(part1->patchdir[patchnum],vec);
if (a >= cospatch) {
intrs=1;
i=0;
while (intersections[i] !=0) {
if (ti == intersections[i])
intrs=0; /* found intersection we already have -it is at boundary*/
i++;
}
if (intrs > 0)
intersections[i]=ti;
}
return intrs;
}
/*................................................................................*/
/*
Find intersections of SC and plane defined by vector w_vec.and returns number of them
*/
int find_intersect_plane(struct particles * part1, struct particles * part2, double halfl2,
struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5])
{
int i, intrs;
double a, c, d, ti, disti;
struct vector nplane, d_vec;
void normalise(struct vector *);
struct vector vec_crossproduct(struct vector, struct vector);
nplane=vec_crossproduct(part1->dir,w_vec);
normalise(&nplane);
normalise(&w_vec);
a = DOT(nplane, part2->dir);
if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/
else {
ti = DOT(nplane,r_cm)/a;
if ((ti > halfl2 ) || (ti < -halfl2)) intrs=0; /* there is no intersection plane sc is too short*/
else {
d_vec.x = ti * part2->dir.x - r_cm.x; /*vector from intersection point to CM*/
d_vec.y = ti * part2->dir.y - r_cm.y;
d_vec.z = ti * part2->dir.z - r_cm.z;
c = DOT (d_vec, w_vec);
if ( c * cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */
else {
d = fabs(DOT (d_vec, part1->dir)) - halfl2;
if (d <= 0) disti = c*c; /*is inside cylinder*/
else disti = d*d + c*c; /*is inside patch*/
if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */
else {
intrs=1;
i=0;
while (intersections[i] !=0) {
if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/
i++;
}
if (intrs > 0) {
intersections[i]=ti;
}
}
}
}
}
return intrs;
}
/*CPSC................................................................................*/
/*
Calculate intersections of sc2 with a patch of sc1 and return them in this works for cylindrical psc -CPSC
*/
int cpsc_intersect(struct particles * part1, struct particles * part2,
double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut,
struct ia_param * param, int which, int patchnum)
{
int intrs;
double a, b, c, d, e, x1, x2, rcut2;
struct vector cm21, vec1, vec2, vec3, vec4;
struct vector vec_crossproduct(struct vector, struct vector);
struct vector vec_sub(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct quat quat_create(struct vector, double, double);
void vec_rotate(struct vector *, struct quat);
void normalise(struct vector *);
int find_intersect_planec(struct particles *, struct particles *, double,
struct vector, struct vector, double, double, double *);
int test_intrpatch(struct particles *, struct vector, double, double, double *, int);
intrs=0;
rcut2=rcut*rcut;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at
cut distance C*/
/*1a- test intersection with half planes of patch and look how far they are
from spherocylinder. If closer then C we got itersection*/
/* plane1 */
/* find intersections of part2 with plane by par1 and part1->patchsides[0] */
intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
// printf("plane1 %d\n", intrs);
/* plane2 */
/* find intersections of part2 with plane by par1 and part1->patchsides[1] */
intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] < 0) ) {
fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n");
exit (1);
}
// printf("plane2 %d\n", intrs);
/*1b- test intersection with cylinder - it is at distance C*/
if (intrs < 2 ) {
cm21=vec_scale(r_cm,-1.0);
vec1=vec_crossproduct(cm21,part1->dir);
vec2=vec_crossproduct(part2->dir,part1->dir);
a = DOT(vec2,vec2);
b = 2*DOT(vec1,vec2);
c = -rcut*rcut + DOT(vec1,vec1);
d = b*b - 4*a*c;
if ( d >= 0) { /*there is intersection with infinite cylinder */
x1 = (-b+sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
/* vectors from center os sc1 to intersection with infinite cylinder*/
vec1.x=part2->dir.x*x1-r_cm.x;
vec1.y=part2->dir.y*x1-r_cm.y;
vec1.z=part2->dir.z*x1-r_cm.z;
e = DOT(part1->dir,vec1);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0 ){
x2 = (-b-sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec2.x = part2->dir.x*x2-r_cm.x;
vec2.y = part2->dir.y*x2-r_cm.y;
vec2.z = part2->dir.z*x2-r_cm.z;
e = DOT(part1->dir,vec2);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
}
// printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e);
/*1c- test intersection with plates at the end - it is at distace C and in wedge*/
if (intrs < 2 ) {
a = DOT(part1->dir, part2->dir);
if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/
else {
/*plane cap1*/
vec1.x= r_cm.x + halfl1*part1->dir.x;
vec1.y= r_cm.y + halfl1*part1->dir.y;
vec1.z= r_cm.z + halfl1*part1->dir.z;
x1 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/
if ((x1 > halfl2 ) || (x1 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/
else {
vec2.x = x1*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */
vec2.y = x1*part2->dir.y - vec1.y;
vec2.z = x1*part2->dir.z - vec1.z;
b = DOT (vec2, vec2);
if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
// printf ("plane cap1 %d %f\n", intrs, x1);
/*plane cap2*/
vec1.x= r_cm.x - halfl1*part1->dir.x;
vec1.y= r_cm.y - halfl1*part1->dir.y;
vec1.z= r_cm.z - halfl1*part1->dir.z;
x2 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/
if ((x2 > halfl2 ) || (x2 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/
else {
vec2.x = x2*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */
vec2.y = x2*part2->dir.y - vec1.y;
vec2.z = x2*part2->dir.z - vec1.z;
b = DOT (vec2, vec2);
if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
// printf ("plane cap2 %d %f\n", intrs,x2);
}
}
/*1d- if there is only one itersection shperocylinder ends within patch wedge
set as second intersection end inside patch*/
if (intrs < 2 ) {
/*whole spherocylinder is in or all out if intrs ==0*/
vec1.x = part2->dir.x*halfl2 - r_cm.x;
vec1.y = part2->dir.y*halfl2 - r_cm.y;
vec1.z = part2->dir.z*halfl2 - r_cm.z;
/*vector from CM of sc1 to end of sc2*/
/*check is is inside sc1*/
a=DOT(vec1,part1->dir);
vec3.x = vec1.x - part1->dir.x*a;
vec3.y = vec1.y - part1->dir.y*a;
vec3.z = vec1.z - part1->dir.z*a;
b=DOT(vec3,vec3);
d = fabs(a)-halfl1;
if ( d <= 0) { /*is in cylindrical part*/
/*c is distance squared from line or end to test if is inside sc*/
if (b < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum);
}
if (intrs < 2 ) {
vec2.x = -part2->dir.x*halfl2 - r_cm.x;
vec2.y = -part2->dir.y*halfl2 - r_cm.y;
vec2.z = -part2->dir.z*halfl2 - r_cm.z;
/*check is is inside sc1*/
a=DOT(vec2,part1->dir);
vec4.x = vec2.x - part1->dir.x*a;
vec4.y = vec2.y - part1->dir.y*a;
vec4.z = vec2.z - part1->dir.z*a;
b=DOT(vec4,vec4);
d = fabs(a) -halfl1;
if (d <= 0) {
/*c is distance squared from line or end to test if is inside sc*/
if (b < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum);
}
}
// printf ("ends %d\n", intrs);
}
return intrs;
}
/*CPSC................................................................................*/
/*
Find intersections of plane defined by vector w_vec.and returns number of them - for cylindrical psc -CPSC
*/
int find_intersect_planec(struct particles * part1, struct particles * part2, double halfl,
struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5])
{
int i, intrs=0;
double a, c, d, ti, disti;
struct vector nplane, d_vec;
void normalise(struct vector *);
struct vector vec_crossproduct(struct vector, struct vector);
nplane=vec_crossproduct(part1->dir,w_vec);
normalise(&nplane);
normalise(&w_vec);
a = DOT(nplane, part2->dir);
if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/
else {
ti = DOT(nplane,r_cm)/a;
if ((ti > halfl ) || (ti < -halfl)) intrs=0; /* there is no intersection plane sc is too short*/
else {
d_vec.x = ti*part2->dir.x - r_cm.x; /*vector from intersection point to CM*/
d_vec.y = ti*part2->dir.y - r_cm.y;
d_vec.z = ti*part2->dir.z - r_cm.z;
c = DOT (d_vec, w_vec);
if ( c *cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */
else {
d = fabs(DOT (d_vec, part1->dir)) - halfl;
if (d <= 0) {
disti= c*c; /*is inside cylinder*/
if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */
else {
intrs=1;
i=0;
while (intersections[i] !=0) {
if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/
i++;
}
if (intrs > 0) intersections[i]=ti;
}
}
}
}
}
return intrs;
}
/*..................................................................................*/
/*
Find projection of cpsc on plane (0,0,1) including cutoff and return
vector to its begining and end and cm
*/
int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, BOOL* positive, double * cutdist,
struct vector *partbeg, struct vector *partend)
{
struct vector vec1;
double k,x1,x2,y1,y2,a,b,c,e,d;
void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result);
void normalise(struct vector*);
if (( (*positive)&& (projectdir->z > 0) ) || ( (!(*positive))&& (projectdir->z < 0) ))
return 0;
if ( fabs(partbeg->z) > (*cutdist) )
return 0;
/* we might have interacting segment*/
x2 = 0.0;
y2 = 0.0;
/*begining point*/
/*if begining projected along particle direction is within cutoff */
if (fabs(partdir->z) > ZEROTOL2) {
projectinz(partbeg,partdir,pbeg);
a=0;
}
else {
/*we need some starting point*/
vec1.x = 2.0*partbeg->x - partend->x;
vec1.y = 2.0*partbeg->y - partend->y;
vec1.z = 2.0*partbeg->z - partend->z;
projectinz(&vec1,projectdir,pbeg);
a=1;
}
if (partdir->z != 0) {
b = fabs(partbeg->z / partdir->z);
} else {
b = (*cutdist)+1.0;
}
if ( (b > (*cutdist)) || (a==1)) {
/*else beginig is at sphere, find intersections with sphere of cutoff radius*/
if ( fabs(projectdir->z) > ZEROTOL2) {
projectinz(partbeg,projectdir,pend);
} else {
pend->x = pbeg->x + projectdir->x;
pend->y = pbeg->y + projectdir->y;
}
if (pend->y == pbeg->y) {
y1=pbeg->y;
y2=pbeg->y;
a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) );
x1 = partbeg->x + a;
x2 = partbeg->x - a;
if (pend->x > pbeg->x) {/*select the right intersection*/
pbeg->x = x2;
x2 = x1;
} else {
pbeg->x = x1;
}
pbeg->y = y1;
} else {
k = (pend->x - pbeg->x)/ (pend->y - pbeg->y);
a = k*k +1;
b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x;
c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x);
e = b*b-a*c;
if (e < 0) {
return 0; /*tehre might be no intersection with sphere*/
}
d = sqrt(e);
if (pend->y > pbeg->y) {/*select the right intersection*/
y1 = (b - d ) /a;
y2 = (b + d ) /a;
}
else {
y1 = (b + d ) /a;
y2 = (b - d ) /a;
}
x1 = k * (y1 - pbeg->y) + pbeg->x;
x2 = k * (y2 - pbeg->y) + pbeg->x;
pbeg->x = x1;
pbeg->y = y1;
pbeg->z = 0.0;
}
}
//printf("pscwall beg %f %f \n",pbeg->x,pbeg->y);
/*end point*/
a = -(*cutdist) * projectdir->z; /*z coordinate of point where projection is in cut distance*/
//printf("sphere end %f %f ",a,partend->z);
if ( ((partend->z < a)&&(*positive)) || ((a < partend->z)&&(!(*positive))) ){
/*end is within cut off - second sphere*/
/*if this is the case vec1 is end of pherocylinder and pend is its projection*/
if (projectdir->z != 0) {
projectinz(partend,projectdir,pend);
} else {
pend->x = pbeg->x + projectdir->x;
pend->y = pbeg->y + projectdir->y;
}
if (pend->y == pbeg->y) {
y1=pend->y;
y2=pend->y;
a=sqrt( (*cutdist)*(*cutdist) - partend->z*partend->z - (pend->y-partend->y)*(pend->y-partend->y) );
x1 = partend->x + a;
x2 = partend->x - a;
if (pbeg->x > pend->x) {/*select the right intersection*/
pend->x = x2;
} else {
pend->x = x1;
}
pend->y = y1;
} else {
k = (pbeg->x - pend->x)/ (pbeg->y - pend->y);
a = k*k +1;
b = partend->y + k*k*pend->y - k*pend->x + k*partend->x;
c = partend->y*partend->y + partend->z*partend->z - (*cutdist)*(*cutdist) + (k*pend->y - pend->x + partend->x)*(k*pend->y - pend->x + partend->x);
e = b*b-a*c;
if (e < 0) {
return 0; /*there might be no intersection with sphere*/
}
d = sqrt(e);
if (pbeg->y > pend->y) {/*select the right intersection*/
y1 = (b - d ) /a;
y2 = (b + d ) /a;
}
else {
y1 = (b + d ) /a;
y2 = (b - d ) /a;
}
x1 = k * (y1 - pend->y) + pend->x;
x2 = k * (y2 - pend->y) + pend->x;
pend->x = x1;
pend->y = y1;
pend->z = 0.0;
}
} else {
if ( ((partbeg->z < a)&&(*positive)) || ((a < partbeg->z)&&(!(*positive))) ) {
/*end is at cutoff going through cylindrical part*/
//printf("cylinder ");
b = (a - partbeg->z)/ partdir->z;
vec1.x = partbeg->x + b * partdir->x;
vec1.y = partbeg->y + b * partdir->y;
vec1.z = a;
projectinz(&vec1,projectdir,pend);
} else {
/* also projected end is within the same sphere as begining- no contribution from cylinder*/
if (x2 == 0.0 ) {
//printf("sphere beg ");
if (projectdir->z != 0) {
projectinz(partbeg,projectdir,pend);
} else {
pend->x = pbeg->x + projectdir->x;
pend->y = pbeg->y + projectdir->y;
}
if (pend->y == pbeg->y) {
y1=pbeg->y;
y2=pbeg->y;
a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) );
x1 = partbeg->x + a;
x2 = partbeg->x - a;
if (pend->x > pbeg->x) {/*select the right intersection*/
pend->x = x1;
} else {
pend->x = x2;
}
pend->y = y1;
} else {
k = (pend->x - pbeg->x)/ (pend->y - pbeg->y);
a = k*k +1;
b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x;
c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x);
e = b*b-a*c;
if (e < 0) {
return 0; /*tehre might be no intersection with sphere*/
}
d = sqrt(e);
if (pend->y > pbeg->y) {/*select the right intersection*/
y1 = (b - d ) /a;
y2 = (b + d ) /a;
}
else {
y1 = (b + d ) /a;
y2 = (b - d ) /a;
}
x1 = k * (y1 - pbeg->y) + pbeg->x;
x2 = k * (y2 - pbeg->y) + pbeg->x;
pend->x = x1;
pend->y = y1;
pend->z = 0.0;
}
} else {
pend->x = x2;
pend->y = y2;
pend->z = 0.0;
}
return 2; /*line end is on sphere of particle begining = no cylindrical cutoff*/
}
}
return 1;
}
int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, double* halfl, BOOL* orientin,
BOOL* positive, double* rcmz, double * cutdist,
struct vector *partbeg, struct vector *partend)
{
struct vector vec1;
double a;
void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result);
if (( (*positive)&& (projectdir->z >= 0) ) || ( (!(*positive))&& (projectdir->z <= 0) ))
return 0;
/*if projected closer point beoynd cutoff no interaction*/
/*project begining of spherocylinder*/
vec1.x = partbeg->x;
vec1.y = partbeg->y;
vec1.z = partbeg->z;
if (-vec1.z/projectdir->z < (*cutdist) ) {
projectinz(&vec1,projectdir,pbeg);
} else {
return 0;
}
/* we have interacting segment*/
if (-partend->z/projectdir->z < (*cutdist) ) {
/*whole segment interacts*/
vec1.z = partend->z;
} else {
vec1.z = -(*cutdist)*projectdir->z;
}
if (partdir->z != 0.0)
a = (vec1.z - (*rcmz)) / partdir->z;
else {
if (*orientin)
a = -(*halfl);
else
a = (*halfl);
}
vec1.x = partdir->x * a;
vec1.y = partdir->y * a;
projectinz(&vec1,projectdir,pend);
return 1;
}
int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4,
struct vector* projectdir, struct vector* partdir, double * cutdist,
struct vector *partbeg, struct vector *partend, struct vector *pend,
double *cuttoproject, BOOL* orientin)
{
double y1,y2,O2z,det,a,b,dirydirz,dir2x,dir2y,dir2z,dirzldiry;
void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result);
dirydirz = partdir->y * partdir->z;
dir2x = partdir->x * partdir->x;
dir2y = partdir->y * partdir->y;
dir2z = partdir->z * partdir->z;
a = 1/(dir2x+dir2y);
if (partdir->x != 0) {
O2z = partbeg->z * partbeg->z;
b=dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x);
if (b < 0 ) {
/*no cutoff from cylindrical part*/
return 0;
}
det = sqrt(b);
y1 = partbeg->y + (dirydirz*partbeg->z + det )*a;
y2 = partbeg->y + (dirydirz*partbeg->z - det )*a;
if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) {
pextr1->y = y1;
pextr2->y = y2;
} else {
pextr1->y = y2;
pextr2->y = y1;
}
pextr1->x = partbeg->x + (partbeg->z*partdir->z - (pextr1->y - partbeg->y)*partdir->y) / partdir->x;
pextr2->x = partbeg->x + (partbeg->z*partdir->z - (pextr2->y - partbeg->y)*partdir->y) / partdir->x;
O2z = partend->z * partend->z;
b= dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x);
if (b >= 0) { /*we have intersections from end*/
det = sqrt(b);
y1 = partend->y + (dirydirz * partend->z + det )*a;
y2 = partend->y + (dirydirz * partend->z - det )*a;
//printf("det %f y1 %f y2 %f \n", det,y1,y2);
if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) {
pextr3->y = y1;
pextr4->y = y2;
} else {
pextr3->y = y2;
pextr4->y = y1;
}
pextr3->x = partend->x + (partend->z*partdir->z - (pextr3->y - partend->y)*partdir->y) / partdir->x;
pextr4->x = partend->x + (partend->z*partdir->z - (pextr4->y - partend->y)*partdir->y) / partdir->x;
} else {
/*no intersection at the end the cutoff intersects the plane
in the perpendicular projection of line segemnt, so we have to use that point */
if (partdir->z == 0) {
fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n");
exit (1);
} else {
a = ((*cuttoproject) - partbeg->z)/ partdir->z;
//if ( projectdir->y * partdir->x < 0 )
pextr3->x = partbeg->x + a * partdir->x;
pextr3->y = partbeg->y + a * partdir->y;
pextr3->z = (*cuttoproject);
//printf("before proj %f %f dir %f %f %f ",pextr3->x,pextr3->y,projectdir->x,projectdir->y,projectdir->z);
projectinz(pextr3,projectdir,pextr4);
pextr3->x = pextr4->x;
pextr3->y = pextr4->y;
pextr3->z = 0.0;
//printf("after proj %f %f \n",pextr3->x,pextr3->y);
return 2;
}
}
} else {
if (partdir->y != 0) {
dirzldiry = partdir->z/partdir->y;
y1 = partbeg->y + partbeg->z * dirzldiry;
det = sqrt( (*cutdist)*(*cutdist) - partbeg->z * partbeg->z * (1+dirzldiry*dirzldiry) );
if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) {
pextr1->x = partbeg->x + det;
pextr2->x = partbeg->x - det;
} else {
pextr1->x = partbeg->x - det;
pextr2->x = partbeg->x + det;
}
pextr1->y = y1;
pextr2->y = y1;
y1 = partend->y + partend->z * dirzldiry;
b = (*cutdist)*(*cutdist) - partend->z * partend->z * (1+dirzldiry*dirzldiry);
if (b >= 0) { /*we have intersections from end*/
det = sqrt(b);
if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) {
pextr3->x = partend->x + det;
pextr4->x = partend->x - det;
} else {
pextr3->x = partend->x - det;
pextr4->x = partend->x + det;
}
pextr3->y = y1;
pextr4->y = y1;
} else {
/*no intersection at the end the cutoff intersects the plane
in the perpendicular projection of line segemnt, so we have to use that point */
if (partdir->z == 0) {
fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n");
exit (1);
} else {
a = ((*cutdist) - partbeg->z)/ partdir->z;
y1 = a * partdir->y + partbeg->y;
if ( projectdir->x * partdir->y > 0 ) {
pextr3->x = a * partdir->x + partbeg->x;
pextr3->y = y1;
pextr4->x = pend->x;
pextr4->y = pend->y;
}else {
pextr3->x = pend->x;
pextr3->y = pend->y;
pextr4->x = a * partdir->x + partbeg->x;
pextr4->y = y1;
}
}
}
} else {
return 0; /* if perpendicular to plane we don't have any intersections*/
}
}
return 1;
}
/*project a point in project direction to z plane z=0*/
void projectinz(struct vector* vec1, struct vector* projectdir,struct vector * projection)
{
projection->x = vec1->x - vec1->z * projectdir->x/projectdir->z;
projection->y = vec1->y - vec1->z * projectdir->y/projectdir->z;
projection->z = 0;
}
/*calculates area defined by four points in z=0 plane */
double areafourpoints(struct vector * pbeg, struct vector * pend, struct vector * pbeg1, struct vector * pend1 )
{
double area =0.0;
struct vector vec1,vec2;
/*area by four points... two half vector cross product
|(pbegining1-pbegining)x(pend-pbegining)|/2 */
vec1.x = pbeg1->x - pbeg->x;
vec1.y = pbeg1->y - pbeg->y;
vec2.x = pend->x - pbeg->x;
vec2.y = pend->y - pbeg->y;
//printf("a: %f %f %f %f \n",vec1.x,vec2.y,vec1.y,vec2.x);
area += fabs(vec1.x*vec2.y - vec1.y*vec2.x)*0.5;
/* + |(pend-pend1)x(pbegining1-pend1)|/2*/
vec1.x = pend->x - pend1->x;
vec1.y = pend->y - pend1->y;
vec2.x = pbeg1->x - pend1->x;
vec2.y = pbeg1->y - pend1->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
return area;
}
/*calculates area defined by six points in z=0 plane */
double areaeightpoints(struct vector * p1, struct vector * p2, struct vector * p3, struct vector * p4,
struct vector * p5, struct vector * p6,struct vector * p7, struct vector * p8)
{
double area =0.0;
struct vector vec1,vec2;
/*area by half vector cross product
|(pbegining-pbegining)x(pend-pbegining)|/2 */
vec1.x = p2->x - p1->x;
vec1.y = p2->y - p1->y;
vec2.x = p3->x - p2->x;
vec2.y = p3->y - p2->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("3");
if (p4 != NULL) {
vec1.x = p3->x - p1->x;
vec1.y = p3->y - p1->y;
vec2.x = p4->x - p3->x;
vec2.y = p4->y - p3->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("4");
if (p5 != NULL) {
vec1.x = p4->x - p1->x;
vec1.y = p4->y - p1->y;
vec2.x = p5->x - p4->x;
vec2.y = p5->y - p4->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("5");
if (p6 != NULL) {
vec1.x = p5->x - p1->x;
vec1.y = p5->y - p1->y;
vec2.x = p6->x - p5->x;
vec2.y = p6->y - p5->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("6");
if (p7 != NULL) {
vec1.x = p6->x - p1->x;
vec1.y = p6->y - p1->y;
vec2.x = p7->x - p6->x;
vec2.y = p7->y - p6->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("7");
if (p8 != NULL) {
vec1.x = p7->x - p1->x;
vec1.y = p7->y - p1->y;
vec2.x = p8->x - p7->x;
vec2.y = p8->y - p7->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("8");
}
}
}
}
}
return area;
}
/*..............................................................................*/
/*........................INPUT OUTPUT..........................................*/
/*..............................................................................*/
/*..............................................................................*/
/**
* convert string num into two integers
*/
void readii(char * num, int value[2]){
char *end, *num2;
void trim (char *);
value[0] = strtol(num, &num2, 10);
trim(num2);
if ((int)strlen(num2) > 0)
value[1] = strtol(num2, &end, 10);
else {
value[1] =0;
return;
}
if(*end){
fprintf(stderr, "Could not convert %s into two integers\n", num);
exit(1);
}
return;
}
/**
* convert string num into integer
*/
int readi(char * num){
char *end;
int i = strtol(num, &end, 10);
if(*end){
fprintf(stderr, "Could not convert %s into integer\n", num);
exit(1);
}
return (int) i;
}
/**
* convert string num into long
*/
long readl(char * num){
char *end;
long i = strtol(num, &end, 10);
if(*end){
fprintf(stderr, "Could not convert %s into long\n", num);
exit(1);
}
return i;
}
/**
* convert string num into double
*/
double readd(char * num){
char *end;
double i = strtod(num, &end);
if(*end){
fprintf(stderr, "Could not convert %s into double\n", num);
exit(1);
}
return i;
}
/*
Reads the run parameters from the external file "options". See the end of the
code for a template. All comments starting with '#' are stripped out. The
options are summarised on standard output and checked for validity of range.
*/
void read_options(struct sim* sim,char filename[30])
{
int i;
int num_options = -1;
double transmx, rotmx, chainmmx, chainrmx;
double angle, chain_angle;
char *id, *value, *tokLine, *line;
FILE *infile;
void strip_comment (char *);
void trim (char *);
void readii(char * num, int value[2]);
double readd(char *);
long readl(char *);
int readi(char *);
/* for new options add before the last line */
Option options[] = {
{"write_cluster", Long, FALSE, &sim->write_cluster},
{"adjust", Long, FALSE, &sim->adjust},
{"movie", Long, FALSE, &sim->movie},
{"nequil", Long, FALSE, &sim->nequil},
{"nsweeps", Long, FALSE, &sim->nsweeps},
{"nrepchange", Long, FALSE, &sim->nrepchange},
{"paramfrq", Long, FALSE, &sim->paramfrq},
{"report", Long, FALSE, &sim->report},
{"seed", Long, FALSE, &seed},
{"pairlist_update", Int, FALSE, &sim->pairlist_update},
{"ptype", Int, FALSE, &sim->ptype},
{"wlm", Int2, FALSE, &sim->wlm},
{"wlmtype", Int, FALSE, &sim->wl.wlmtype},
{"press", Double, FALSE, &sim->press},
{"paralpress", Double, FALSE, &sim->paralpress},
{"edge_mx", Double, FALSE, &sim->edge.mx},
{"shave", Double, FALSE, &sim->shave},
{"chainprob", Double, FALSE, &sim->chainprob},
{"switchprob", Double, FALSE, &sim->switchprob},
{"temper", Double, FALSE, &sim->temper},
{"paraltemper", Double, FALSE, &sim->paraltemper},
{"transmx", Double, FALSE, &transmx},
{"rotmx", Double, FALSE, &rotmx},
{"chainmmx", Double, FALSE, &chainmmx},
{"chainrmx", Double, FALSE, &chainrmx},
{"last", Int, FALSE, NULL}
};
while(options[++num_options].var != NULL)
;
/*--- 1. Read in values ---*/
size_t line_size = (STRLEN + 1) * sizeof(char);
line = (char *) malloc(line_size);
infile = fopen(filename, "r");
if (infile == NULL) {
fprintf (stderr, "\nERROR: Could not open options file.\n\n");
exit (1);
}
while(getline(&line, &line_size, infile) != -1){
// strip comments
strip_comment(line);
trim(line);
if(strlen(line) == 0){
continue;
}
// tokenize
tokLine = line;
id = strtok(tokLine, "=");
if(id == NULL){
fprintf(stderr, "error parsing Configuration line (%s)", line);
free(line);
exit(1);
}
trim(id);
tokLine = NULL;
value = strtok(tokLine, "=");
trim(value);
if(value == NULL){
fprintf(stderr, "error parsing Configuration line (%s)", line);
free(line);
exit(1);
}
//printf("id: %s; value: %s\n", id, value);
for(i = 0; i < num_options; i++){
if(strcmp(id, options[i].id) == 0){
if(options[i].type == Int2){
readii(value,*((int (*)[2]) options[i].var));
options[i].set = TRUE;
break;
}
if(options[i].type == Int){
*((int *) options[i].var) = readi(value);
options[i].set = TRUE;
break;
}
else if(options[i].type == Long){
*((long *) options[i].var) = readl(value);
options[i].set = TRUE;
break;
}
else if(options[i].type == Double){
*((double *) options[i].var) = readd(value);
options[i].set = TRUE;
break;
}
else {
fprintf(stderr, "Could not determine type of %s!\n", id);
free(line);
exit(1);
}
}
}
if(i == num_options){
fprintf(stderr, "Unknown identifier %s!\nWill procede.\n", id);
}
}
fclose (infile);
free(line);
/* Check, wheter all options have been readin */
for(i = 0; i < num_options; i++){
if(!options[i].set){
fprintf(stderr, "option '%s' is not set!\n", options[i].id);
exit(1);
}
}
/*--- 2. Summarize results on standard output ---*/
/* Density of close-packed spherocylinders */
// rho_cp = 2.0/(sqrt(2.0) + *length * sqrt(3.0));
printf (" Pressure coupling type: %d\n", sim->ptype);
printf (" Pressure: %.8lf\n", sim->press);
printf (" Replica exchange pressure: %.8lf\n", sim->paralpress);
printf (" Average volume change attempts per sweep: %.8lf\n", sim->shave);
printf (" Equilibration sweeps: %ld\n", sim->nequil);
printf (" Sweeps between step size adjustments: %ld\n", sim->adjust);
printf (" Production sweeps: %ld\n", sim->nsweeps);
printf (" Sweeps between statistics samples: %ld\n", sim->paramfrq);
printf (" Sweeps between statistics reports: %ld\n", sim->report);
printf (" Average chain move attempts per sweep: %.8lf\n", sim->chainprob);
printf (" Initial maximum displacement: %.8lf\n", transmx);
printf (" Inititial maximum angular change (degrees): %.8lf\n", rotmx);
printf (" Inititial maximum box edge change: %.8lf\n", sim->edge.mx);
printf (" Initial maximum chain displacement: %.8lf\n", chainmmx);
printf (" Inititial maximum chain angular change (degrees): %.8lf\n", chainrmx);
printf (" Temperature in kT/e: %.8lf\n", sim->temper);
printf (" Parallel tempering temperature in kT/e: %.8lf\n", sim->paraltemper);
printf (" Sweeps between replica exchange: %ld\n", sim->nrepchange);
printf (" Wang-Landau method: %d %d\n", sim->wlm[0],sim->wlm[1]);
printf (" Calculate the Wang-Landau method for atom type: %d\n", sim->wl.wlmtype);
printf (" Average type switch attempts per sweep: %.8lf\n", sim->switchprob);
printf (" Number of Sweeps per pairlist update: %d\n", sim->pairlist_update);
printf (" Random number seed: %ld\n", seed);
printf (" Number of sweeps per writing out cluster info: %ld\n", sim->write_cluster);
if (sim->movie > 0) {
printf (" Sweeps between movie frames: %ld\n", sim->movie);
} else {
printf (" No movie\n");
}
printf ("\n");
if(sim->pairlist_update){
printf(" A pairlist will be generated every %d steps. This is a greedy"
" algorithm; make sure you don't have big chains etc.!\n",
sim->pairlist_update);
}
/*--- 3. Validity checks ---*/
if (rotmx < 0.0 || rotmx > 180) {
fprintf (stderr, "ERROR: Maximum orientation change must be in range 0 to 180.\n\n");
exit (1);
}
if (chainrmx < 0.0 || chainrmx > 180) {
fprintf (stderr, "ERROR: Maximum orientation change for chains must be in range 0 to 180.\n\n");
exit (1);
}
if ( (sim->ptype <0) || (sim->ptype>3) ) {
fprintf (stderr, "ERROR: Unknown pressure coupling %d. Program only knows: 0 - anisotropic coupling, \
1 - isotropic coupling, 2 - isotropic in xy z=const, 3 - isotropic xy V=const.\n\n",sim->ptype);
exit (1);
}
if ( (sim->wlm[0] <0) || (sim->wlm[0] > 7) || (sim->wlm[1] <0) || (sim->wlm[1] > 7) ) {
fprintf (stderr, "ERROR: Unknown Wang-Landau method %d %d. Program only knows: 0 - none, \
1 - z-direction od 1st particle, 2 - pore in membrane, 3 - zorientation of 0th particle,\
4 - distance of fist two particles, 5 - pore around z-axis above CM,\
6 - pore around z-axis above 0th particle, 7 - number of particles in contact \n\n",sim->wlm[0],sim->wlm[1]);
exit (1);
}
if ( (sim->wlm[0] == 0) && (sim->wlm[1] > 0) ) {
fprintf (stderr, "ERROR: Wang-Landau method has to be set for first order parameter and then for second order parameter\n\n");
exit (1);
}
if ( (sim->wlm[0] == 2) || (sim->wlm[0] == 5) || (sim->wlm[0] == 6) ) {
if(sim->wl.wlmtype < 1){
fprintf (stderr, "ERROR: Atom type for the Wang-Landau Method (%d) was false defined.\n\n",sim->wl.wlmtype);
exit (1);
}
if ( (sim->wlm[1] == 2) || (sim->wlm[1] == 5) || (sim->wlm[1] == 6) ) {
fprintf (stderr, "ERROR: Simulaneous use of two pore order parameters has not been implemented yet.\n\n");
exit (1);
}
}
/* we store maximum rotation as half angle - useful for quaterions*/
angle = rotmx / 180.0 * PIH *0.5;
rotmx = cos((rotmx)/180.0*PIH);
chain_angle = chainrmx / 180.0 * PIH;
chainrmx = cos((chainrmx)/180.0*PIH);
sim->edge.mx *= 2.0; /* The full range is -maxl to +maxl, i.e. spanning 2*maxl */
transmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */
chainmmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */
for (i=0;i<MAXT;i++) {
sim->trans[i].mx = transmx;
sim->rot[i].mx = rotmx;
sim->rot[i].angle = angle;
}
for (i=0;i<MAXMT;i++) {
sim->chainm[i].mx = chainmmx;
sim->chainr[i].mx = chainrmx;
sim->chainr[i].angle = chain_angle;
}
//parallel tempering
#ifdef MPI
if ( (sim->temper != sim->paraltemper) && (sim->mpinprocs <2) ) {
printf("ERROR: Paralllel tempering at single core does not work.\n\n");
exit(1);
}
sim->dtemp = (sim->paraltemper - sim->temper )/(sim->mpinprocs-1);
sim->temper += sim->dtemp * sim->mpirank;
if ( (sim->press != sim->paralpress) && (sim->mpinprocs <2) ) {
printf("ERROR: Pressure replica exchange at single core does not work.\n\n");
exit(1);
}
sim->dpress = (sim->paralpress - sim->press )/(sim->mpinprocs-1);
sim->press += sim->dpress * sim->mpirank;
seed += sim->mpirank;
sim->mpiexch.mx = sim->dtemp;
sim->mpiexch.angle = sim->dpress;
#endif
}
/*..............................................................................*/
/*
Used by read_options to read a long integer with error checking.
NOT USED ANYMORE
*/
long read_long(FILE *infile, char *error) {
char *gotline;
char line[500];
int fields;
long value;
gotline = fgets(line, sizeof(line), infile);
fields = sscanf(line, "%ld", &value);
if (gotline == NULL || fields != 1) {
fprintf (stdout, "\nERROR reading %s from options file.\n\n", error);
exit (1);
}
return value;
}
/*
Used by read_options to read a long integer with error checking.
NOT USED ANYMORE
*/
int read_int(FILE *infile, char *error) {
char *gotline;
char line[500];
int fields;
int value;
gotline = fgets(line, sizeof(line), infile);
fields = sscanf(line, "%d", &value);
if (gotline == NULL || fields != 1) {
fprintf (stdout, "\nERROR reading %s from options file.\n\n", error);
exit (1);
}
return value;
}
/*..............................................................................*/
/*
Used by read_options to read a double precision with error checking.
NOT USED ANYMORE
*/
double read_double(FILE *infile, char *error) {
char *gotline;
char line[500];
int fields;
double value;
gotline = fgets(line, sizeof(line), infile);
fields = sscanf(line, "%le", &value);
if (gotline == NULL || fields != 1) {
fprintf (stdout, "\nERROR reading %s from options file.\n\n", error);
exit (1);
}
return value;
}
/*..............................................................................*/
/****************************************************************************
* CONFIG INITIALIZATION
*****************************************************************************/
/*
Reads in the initial configuration from the file "config.init". Each line
contains the three components of the position vector and three components of
the direction vector and three components of patch direction for a spherocylinder.
The direction vector is normalised
after being read in. The configuration is checked for particle overlaps.
*/
void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30])
{
int err,fields,tmp_type;
long i,j,current,first;
FILE * infile;
char * line, line2[STRLEN];
size_t line_size = (STRLEN + 1) * sizeof(char);
line = (char *) malloc(line_size);
struct particles chorig[MAXCHL];
int overlap(struct particles, struct particles, struct vector,
struct ia_param [MAXT][MAXT]);
void normalise(struct vector *);
void ortogonalise(struct vector *, struct vector);
void usepbc(struct vector *, struct vector);
double anint(double);
void strip_comment (char *);
void trim (char *);
void aftercommand(char *, char *, char);
double maxlength = 0;
for(i = 0; i < MAXT; i++){
if(maxlength < topo->ia_params[i][i].len[0])
maxlength = topo->ia_params[i][i].len[0];
}
infile = fopen(filename, "r");
if (infile == NULL) {
fprintf (stderr, "\nERROR: Could not open config.init file.\n\n");
exit (1);
}
if(getline(&line, &line_size, infile) == -1){
fprintf (stderr, "ERROR: Could not read box size.\n\n");
exit (1);
}
strip_comment(line);
trim(line);
if (sscanf(line, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) {
if(getline(&line, &line_size, infile) == -1){
fprintf (stderr, "ERROR: Could not read box size.\n\n");
exit (1);
}
aftercommand(line2,line,BOXSEP);
strip_comment(line2);
trim(line2);
if (sscanf(line2, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) {
fprintf (stderr, "ERROR: Could not read box size.\n\n");
exit (1);
}
}
if (conf->box.x < maxlength * 2.0 + 2.0) {
printf ("WARNING: x box length is less than two spherocylinders long.\n\n");
}
if (conf->box.y < maxlength * 2.0 + 2.0) {
printf ("WARNING: y box length is less than two spherocylinders long.\n\n");
}
if (conf->box.z < maxlength * 2.0 + 2.0) {
printf ("WARNING: z box length is less than two spherocylinders long.\n\n");
}
DEBUG_INIT("Position of the particle");
for (i=0; i < topo->npart; i++) {
if(getline(&line, &line_size, infile) == -1){
break;
}
strip_comment(line);
trim(line);
fields = sscanf(line, "%le %le %le %le %le %le %le %le %le %d",
&conf->particle[i].pos.x, &conf->particle[i].pos.y, &conf->particle[i].pos.z,
&conf->particle[i].dir.x, &conf->particle[i].dir.y, &conf->particle[i].dir.z,
&conf->particle[i].patchdir[0].x, &conf->particle[i].patchdir[0].y, &conf->particle[i].patchdir[0].z,
&conf->particle[i].switched);
conf->particle[i].patchdir[1].x = conf->particle[i].patchdir[1].y = conf->particle[i].patchdir[1].z =0;
conf->particle[i].chdir[0].x = conf->particle[i].chdir[0].y = conf->particle[i].chdir[0].z =0;
conf->particle[i].chdir[1].x = conf->particle[i].chdir[1].y = conf->particle[i].chdir[1].z =0;
DEBUG_INIT("Line: %s\nNumber of Fields: %d", line, fields);
if (fields == 9){
conf->particle[i].switched = 0;
fprintf(stdout, "WARNING: Particle %ld is assumed to be not switched!\n", i+1);
fields++;
}
if (fields != 10) {
fprintf (stderr, "ERROR: Could not read coordinates for particle %ld.\n \
Did you specify box size at the begining?\n\n", i+1);
free(line);
exit (1);
}
/* Scale position vector to the unit cube */
usepbc(&conf->particle[i].pos, conf->box );
conf->particle[i].pos.x /= conf->box.x;
conf->particle[i].pos.y /= conf->box.y;
conf->particle[i].pos.z /= conf->box.z;
if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].dir, conf->particle[i].dir) < ZEROTOL )) {
//DEBUG_INIT("Geotype = %d < %d", conf->particle[i].geotype,SP);
fprintf (stderr,
"ERROR: Null direction vector supplied for particle %ld.\n\n", i+1);
free(line);
exit (1);
} else {
normalise(&conf->particle[i].dir);
}
if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].patchdir[0], conf->particle[i].patchdir[0]) < ZEROTOL )) {
fprintf (stderr,
"ERROR: Null patch vector supplied for particle %ld.\n\n", i+1);
free(line);
exit (1);
} else {
ortogonalise(&conf->particle[i].patchdir[0],conf->particle[i].dir);
normalise(&conf->particle[i].patchdir[0]);
}
// Switch the type
if(conf->particle[i].switched){
if(conf->particle[i].switchtype == 0){
fprintf(stderr, "ERROR: Particle %ld switched even though it has no switchtype", i);
free(line);
exit(1);
}
tmp_type = conf->particle[i].type;
conf->particle[i].type = conf->particle[i].switchtype;
conf->particle[i].switchtype = tmp_type;
}
DEBUG_INIT("%ld:\t%lf\t%lf\t%lf", i, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z);
}
free(line);
/*Make chains WHOLE*/
for (i=0;i<topo->chainnum;i++){
j=0;
current = topo->chainlist[i][0];
first = current;
chorig[0].pos = conf->particle[first].pos;
while (current >=0 ) {
/*shift the chain particle by first one*/
conf->particle[current].pos.x -= chorig[0].pos.x;
conf->particle[current].pos.y -= chorig[0].pos.y;
conf->particle[current].pos.z -= chorig[0].pos.z;
/*put it in orig box*/
conf->particle[current].pos.x -= anint(conf->particle[current].pos.x);
conf->particle[current].pos.y -= anint(conf->particle[current].pos.y);
conf->particle[current].pos.z -= anint(conf->particle[current].pos.z);
//printf("ant: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z);
/*shot it back*/
conf->particle[current].pos.x += chorig[0].pos.x;
conf->particle[current].pos.y += chorig[0].pos.y;
conf->particle[current].pos.z += chorig[0].pos.z;
//printf("posstart: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z);
j++;
current = topo->chainlist[i][j];
}
}
err = 0;
//for (i=0; i < topo->npart-1; i++) {
// for (j=i+1; j < topo->npart; j++) {
// if ( overlap(conf->particle[i], conf->particle[j], conf->box, topo->ia_params) ) {
// fprintf (stderr,
// "ERROR: Overlap in initial coniguration between particles %ld and %ld.\n",
// i+1, j+1);
// err = 1;
// }
// }
//}
if (err) {
printf ("\n");
exit (1);
}
fclose (infile);
fflush (stdout);
}
/*..............................................................................*/
/****************************************************************************
* TOPOLOGY INITIALIZATION
*****************************************************************************/
/*
Create lists for chain operations: Connectivity list where it is written for each sc
with which sc it is connected. The order is important because spherocylinders have direction
First is interacting tail then head. Chain list where particles are assigned to chains to
which they belong
*/
void init_top(struct topo * topo, struct conf * conf, struct sim * sim,char filename[30])
{
long i,j,k,mol,maxch,maxpart;
FILE *infile;
char *pline=NULL, *dummy=NULL, *sysnames[MAXN];
char line[STRLEN], keystr[STRLEN], molname[STRLEN];
unsigned size;
long *sysmoln /*[MAXN]*/;
BOOL exclusions[MAXT][MAXT];
char *fgets2(char *, int , FILE *);
void strip_comment (char *);
void trim(char *);
int continuing(char *);
void upstring (char *);
int filltypes(char **, struct topo * topo);
int fillexter(char **, struct topo * topo);
int fillexclusions(char **, BOOL (*exclusions)[MAXT][MAXT]);
void beforecommand(char *, char *, char);
int fillmol(char *, char *, struct molecule * molecules, struct topo * topo);
int fillsystem(char *, char *[MAXN], long **);
void initparams(struct topo * topo);
void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]);
int topdealoc(char **, char *[MAXN], long **, struct molecule *);
struct molecule molecules[MAXMT];
if ((infile = fopen(filename, "r")) == NULL) {
fprintf (stderr, "\nTOPOLOGY ERROR: Could not open top.init file.\n\n");
exit (1);
}
fprintf (stdout, "Initialize chainlist...\n");
fflush(stdout);
sysmoln = malloc( sizeof(long)*MAXN);
if(sysmoln == NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sysmoln");
exit(1);
}
struct particles tmp_particles[MAXN];
for (i=0;i<MAXN;i++) {
if (i < MAXMT) {
topo->chainparam[i].bond1eq = -1;
topo->chainparam[i].bond1c = -1;
topo->chainparam[i].bond2eq = -1;
topo->chainparam[i].bond2c = -1;
topo->chainparam[i].bonddc = -1;
topo->chainparam[i].angle1eq = -1;
topo->chainparam[i].angle1c = -1;
topo->chainparam[i].angle2eq = -1;
topo->chainparam[i].angle2c = -1;
molecules[i].name = NULL;
molecules[i].type = malloc(sizeof(long)*MAXN);
molecules[i].switchtype = malloc(sizeof(long)*MAXN);
molecules[i].delta_mu = malloc(sizeof(double)*MAXN);
for (j=0;j<MAXN;j++) {
molecules[i].type[j] = -1;
}
}
for (j = 0; j < MAXCHL; j++){
topo->chainlist[i][j] = -1;
}
sysnames[i]=NULL;
}
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
exclusions[i][j]=FALSE;
}
}
topo->exter.exist = FALSE;
topo->exter.thickness = 0.0;
topo->exter.epsilon = 0.0;
topo->exter.attraction = 0.0;
topo->exter.sqmaxcut = 0.0;
for(i = 0; i < MAXT; i++){
for(j = 0; j < MAXT; j++){
for(k = 0; k < 2; k++){
topo->ia_params[i][j].geotype[k] = 0;
}
}
}
fprintf (stdout, "Reading topology...\n");
fflush(stdout);
molname[0] = ' ';
initparams(topo);
pline=malloc((size_t)STRLEN);
while (fgets2(line,STRLEN-2,infile) != NULL) {
strcpy(pline,line);
if (!pline) fprintf (stderr, "\nTOPOLOGY ERROR: Empty line in topology.\n\n");
/* build one long line from several fragments */
while (continuing(line) && (fgets2(line,STRLEN-1,infile) != NULL)) {
size=strlen(pline)+strlen(line)+1;
free(pline);
pline=malloc((size_t)size);
strcat(pline,line);
}
/* skip trailing and leading spaces and comment text */
strip_comment (pline);
trim (pline);
/* if there is something left... */
if ((int)strlen(pline) > 0) {
// get the [COMMAND] key
if (pline[0] == OPENKEY) {
pline[0] = ' ';
beforecommand(keystr,pline,CLOSEKEY);
upstring (keystr);
} else {
//DEBUG fprintf (stdout, "Topology read type:%s, %s \n",keystr,pline);
if (!strcmp(keystr,"TYPES")) {
fflush(stdout);
if (!filltypes(&pline, topo)) {
DEBUG_INIT("Something went wrong with filltypes");
fprintf (stderr, "\nTOPOLOGY ERROR: in reading types\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
DEBUG_INIT("back in init_top");
} else{
if (!strcmp(keystr,"MOLECULES")){
DEBUG_INIT("Let's go to the molecules");
if (molname[0] == ' ') {
beforecommand(molname,pline,SEPARATOR);
i=0;
while (molecules[i].name != NULL)
i++;
DEBUG_INIT("in the middle of getting to fillmol");
molecules[i].name = malloc(strlen(molname)+1);
strcpy(molecules[i].name, molname);
fprintf (stdout, "Topology read for molecule: %s \n",molname);
}
if (!fillmol(molname, pline, molecules, topo)) {
fprintf (stderr, "\nTOPOLOGY ERROR: in reading molecules\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
if ((dummy = strchr (pline,CLOSEMOL)) != NULL)
molname[0] = ' ';
} else {
if (!strcmp(keystr,"SYSTEM")) {
if (!fillsystem(pline,sysnames,&sysmoln)) {
fprintf (stderr, "\nTOPOLOGY ERROR: in reading system\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
} else {
if (!strcmp(keystr,"EXTER")) {
fflush(stdout);
if (!fillexter(&pline, topo)) {
DEBUG_INIT("Something went wrong with external potential");
fprintf (stderr, "\nTOPOLOGY ERROR: in reading external potential\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
} else {
if (!strcmp(keystr,"EXCLUDE")) {
fflush(stdout);
if (!fillexclusions(&pline,&exclusions)) {
DEBUG_INIT("Something went wrong with exclusions potential");
fprintf (stderr, "\nTOPOLOGY ERROR: in reading exclusions\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
} else {
fprintf (stderr, "\nTOPOLOGY ERROR: invalid keyword:%s.\n\n", keystr);
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
}
}
}
}
}
}
}
/*we have sucessfully read topology*/
if (pline !=NULL) free(pline);
pline=NULL;
fclose (infile);
fflush (stdout);
/*fill ia_params combinations*/
fprintf (stdout, "\nTopology succesfully read. Generating pair interactions...\n");
genparampairs(topo,&exclusions);
double maxlength = 0;
for(i = 0; i < MAXT; i++){
if(maxlength < topo->ia_params[i][i].len[0])
maxlength = topo->ia_params[i][i].len[0];
}
topo->sqmaxcut += maxlength+2;
topo->sqmaxcut *= 1.1;
topo->maxcut = topo->sqmaxcut;
topo->sqmaxcut = topo->sqmaxcut*topo->sqmaxcut;
topo->exter.sqmaxcut += maxlength;
topo->exter.sqmaxcut *= topo->exter.sqmaxcut*1.1;
/*TODO fill chain list and maxch, park particle type*/
fprintf (stdout, "Generating chainlist...\n");
maxch=0;
maxpart=0;
i=0;
while (sysnames[i]!=NULL) {
mol=0;
while (strcmp(molecules[mol].name,sysnames[i])) {
mol++;
if (molecules[mol].name == NULL) {
fprintf (stderr, "TOPOLOGY ERROR: molecules %s is not defined.\n\n",sysnames[i]);
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit(1);
}
}
for (j=0;j<sysmoln[i];j++) {
//DEBUG fprintf (stdout, "molnames %s sysname %s sysnum %ld \n",molnames[mol],sysnames[i],sysmoln[i]);
k=0;
while (molecules[mol].type[k] != -1) {
tmp_particles[maxpart].type = molecules[mol].type[k];
tmp_particles[maxpart].switchtype = molecules[mol].switchtype[k];
tmp_particles[maxpart].delta_mu = molecules[mol].delta_mu[k];
tmp_particles[maxpart].chaint = mol;
tmp_particles[maxpart].chainn = maxch;
if (k > MAXCHL) {
fprintf (stderr, "TOPOLOGY ERROR: more particles in chan (%ld) than allowed(%d).\n",k,MAXCHL);
fprintf (stderr, "Change MAXCHL in source and recompile the program. \n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit(1);
}
if (molecules[mol].type[1] != -1) {
topo->chainlist[maxch][k] = maxpart;
}
k++;
maxpart++;
if (maxpart > MAXN) {
fprintf (stderr, "TOPOLOGY ERROR: more particles(%ld) than allowed(%d).\n",maxpart,MAXN);
fprintf (stderr, "Change MAXN in source and recompile the program. \n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit(1);
}
}
if (molecules[mol].type[1] != -1) {
maxch++;
}
}
i++;
}
topo->npart = maxpart;
/* write the particles from the temporary to the "permanent" conf */
conf->particle = malloc(sizeof(struct particles) * topo->npart);
if(conf->particle == NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for conf->particle");
exit(1);
}
for(i = 0; i < topo->npart; i++){
conf->particle[i].type = tmp_particles[i].type;
conf->particle[i].switchtype = tmp_particles[i].switchtype;
conf->particle[i].delta_mu = tmp_particles[i].delta_mu;
conf->particle[i].chaint = tmp_particles[i].chaint;
conf->particle[i].chainn = tmp_particles[i].chainn;
}
/* Initialize the clusterlist */
sim->clusterlist = malloc(sizeof(long) * topo->npart);
if(sim->clusterlist == NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clusterlist!");
exit(1);
}
sim->clustersenergy = malloc(sizeof(double) * topo->npart);
if(sim->clustersenergy== NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clustersenergy!");
exit(1);
}
sim->clusters = NULL;
/* get all the particles with switch type */
long switchlist[topo->npart];
long n_switch_part = 0;
for(i = 0; i < topo->npart; i++){
if(conf->particle[i].type != conf->particle[i].switchtype){
switchlist[n_switch_part] = i;
n_switch_part++;
}
}
topo->n_switch_part = n_switch_part;
if (n_switch_part == 0 && sim->switchprob > 0){
fprintf(stderr, "TOPOLOGY WARNING: No switchable particles found, but probability for a switch is not zero!\n");
sim->switchprob = 0;
fprintf(stderr, "TOPOLOGY WARNING: We changed Switch Probability to zero in this run!\n");
}
topo->switchlist=NULL;
if (n_switch_part > 0){
topo->switchlist = malloc(sizeof(long) * n_switch_part);
for(i = 0; i < n_switch_part; i++){
topo->switchlist[i] = switchlist[i];
//DEBUG
//printf("%ld is in switchlist\n", switchlist[i]);
}
}
j = 0;
while (topo->chainlist[j][0] >= 0) {
j++;
}
topo->chainnum = j;
if (topo->chainnum != maxch) {
fprintf (stderr, "TOPOLOGY ERROR: Maximum number of chains(%ld) does not agree with number of chains (%ld)\n\n",maxch,topo->chainnum);
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
k=0;
/*clear connectivity and then fill it from chain list*/
fprintf (stdout, "Generating connectivity...\n");
for (i=0; i<MAXN; i++) {
topo->conlist[i][0] = -1;
topo->conlist[i][1] = -1;
topo->conlist[i][2] = -1;
topo->conlist[i][3] = -1;
}
conf->sysvolume = 0;
for (i=0; i<maxpart; i++) {
for (j=0; j<MAXCHL; j++) {
if (topo->chainlist[i][j] >= 0) {
k = topo->chainlist[i][j];
if ((j+1 < MAXCHL)&&(topo->chainlist[i][j+1] >= 0))
topo->conlist[k][1] = topo->chainlist[i][j+1]; /*if there is a next particle fill it to head bond*/
if (j > 0)
topo->conlist[k][0] = topo->chainlist[i][j-1]; /*if this is not first particle fill tail bond*/
if ((j+2 < MAXCHL)&& (topo->chainlist[i][j+2] >= 0))
topo->conlist[k][3] = topo->chainlist[i][j+2]; /*if there is a second next particle fill it second neighbour*/
if (j > 1)
topo->conlist[k][2] = topo->chainlist[i][j-2]; /*if this is not second or first particle fill second tail bond*/
}
}
conf->sysvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume;
}
/*DEBUG
for (i=0; i<MAXN; i++) {
for (j=0; j<MAXCHL; j++) {
fprintf (stderr, " %d",chainlist[i][j]);
}
fprintf (stderr, " \n");
}
for (i=0; i<MAXN; i++) {
printf (" %ld %ld %ld %ld\n",conlist[i][0],conlist[i][1],conlist[i][2],conlist[i][3]);
}
*/
// Mark particles as not switched
for(i = 0; i < maxpart; i++){
conf->particle[i].switched = 0;
}
topdealoc(&pline,sysnames,&sysmoln, molecules);
DEBUG_INIT("Finished with reading the topology");
/* Parallel tempering check */
#ifdef MPI
// probability to switch replicas = exp ( -0.5 * dT*dT * N / (1 + dT) )
printf("Probability to switch replicas is roughly: %f\n",exp(-0.5 * maxpart * sim->dtemp * sim->dtemp / (1.0 + sim->dtemp)) );
#endif
}
/*..........................................................................*/
/*dealocting memory for init_top*/
int topdealoc(char **pline,char *sysnames[MAXN], long **sysmoln, struct molecule * molecules)
{
long i;
if ((*pline) != NULL) free((*pline));
(*pline)=NULL;
if ((*sysmoln) != NULL) free((*sysmoln));
(*sysmoln)=NULL;
for (i=0;i<MAXN;i++) {
if (i < MAXMT) {
free(molecules[i].name);
free(molecules[i].type);
free(molecules[i].switchtype);
free(molecules[i].delta_mu);
}
if ((sysnames[i]) != NULL) free(sysnames[i]);
sysnames[i]=NULL;
}
return 0;
}
/* initiate vectors of a single particle*/
void int_partvec(long target, struct ia_param * ia_parami, struct conf * conf )
{
struct quat quatrot;
struct quat quat_create(struct vector, double, double);
void vec_rotate(struct vector *, struct quat);
void normalise(struct vector *);
void ortogonalise(struct vector *,struct vector);
if ( (ia_parami->geotype[0] == SCA) || (ia_parami->geotype[0] == SCN) ){
/*SCA and SCN are isotropic... nothing to initialize*/
return;
}
normalise (&conf->particle[target].dir);
ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir);
/*calculate patch sides*/
if ( (ia_parami->geotype[0] == PSC) || (ia_parami->geotype[0] == CPSC)
|| (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){
/* rotate patch vector by half size of patch*/
conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[0]),quatrot);
/*second side*/
conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[1]),quatrot);
}
/*calculate second patchdir*/
if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ||
(ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){
conf->particle[target].patchdir[1] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].dir, ia_parami->csecpatchrot[0], ia_parami->ssecpatchrot[0]);
vec_rotate(&(conf->particle[target].patchdir[1]),quatrot);
ortogonalise(&conf->particle[target].patchdir[1],conf->particle[target].dir);
}
/*calculate second patch sides*/
if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){
/* rotate patch vector by half size of patch*/
conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[2]),quatrot);
/*second side*/
conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[3]),quatrot);
}
/*calculate chdir vector*/
if ( (ia_parami->geotype[0] == CHPSC) || (ia_parami->geotype[0] == CHCPSC)
|| (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){
conf->particle[target].chdir[0] = conf->particle[target].dir;
quatrot = quat_create(conf->particle[target].patchdir[0], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]);
vec_rotate(&(conf->particle[target].chdir[0]), quatrot);
/* rotate patch vector by half size of patch*/
conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[0]),quatrot);
/*second side*/
conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[1]),quatrot);
}
/*calculate chdir vector for seond patch*/
if ( (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC) ){
conf->particle[target].chdir[1] = conf->particle[target].dir;
quatrot = quat_create(conf->particle[target].patchdir[1], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]);
vec_rotate(&(conf->particle[target].chdir[1]), quatrot);
/* rotate patch vector by half size of patch to get sides*/
conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[2]),quatrot);
/*second side*/
conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[3]),quatrot);
}
}
/* calculate vectors on particles for speedup*/
void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf )
{
long i;
void int_partvec(long target, struct ia_param *, struct conf * conf );
for(i = 0; i < topo->npart; i++){
if ( topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0] < SP)
int_partvec(i,&(topo->ia_params[conf->particle[i].type][conf->particle[i].type]),conf);
}
}
/*generate interations pairs*/
void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT])
{
int i,j,k;
int a[2];
int len;
double length = 0; // The length of a PSC, currently only one is allow, ie implemented
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
if (i!=j) {
if((topo->ia_params[j][j].geotype[0] != 0) && (topo->ia_params[i][i].geotype[0] != 0)){
a[0] = i;
a[1] = j;
for(k = 0; k < 2; k++){
topo->ia_params[i][j].geotype[k] = topo->ia_params[a[k]][a[k]].geotype[0];
topo->ia_params[i][j].len[k] = topo->ia_params[a[k]][a[k]].len[0];
if (topo->ia_params[a[k]][a[k]].len[0] > 0){
if (length == 0){
length = topo->ia_params[a[k]][a[k]].len[0];
}
else if (length > 0){
if (length != topo->ia_params[a[k]][a[k]].len[0]){
fprintf(stderr, "Error: ");
fprintf(stderr, "Different lengths for spherocylinders have not been implemented yet!\n");
fprintf(stderr, "\tCheck the length of type %d!\n", a[k]);
exit(1);
}
}
}
topo->ia_params[i][j].half_len[k] = topo->ia_params[a[k]][a[k]].half_len[0];
/* Handle angles only, when geotype is a patchs sphero cylinder */
if(topo->ia_params[i][j].geotype[k] >= PSC && topo->ia_params[i][j].geotype[k] < SP){
topo->ia_params[i][j].pangl[k] = topo->ia_params[a[k]][a[k]].pangl[0];
topo->ia_params[i][j].panglsw[k] = topo->ia_params[a[k]][a[k]].panglsw[0];
topo->ia_params[i][j].pcangl[k] = cos(topo->ia_params[i][j].pangl[k]/2.0/180*PI);
topo->ia_params[i][j].pcanglsw[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/180*PI);
topo->ia_params[i][j].pcoshalfi[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/2.0/180*PI);
topo->ia_params[i][j].psinhalfi[k] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k] * topo->ia_params[i][j].pcoshalfi[k]);
}
/* Only when the PSC is chiral */
if( (topo->ia_params[i][j].geotype[k] == CHCPSC) || (topo->ia_params[i][j].geotype[k] == CHPSC) \
|| (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){
topo->ia_params[i][j].chiral_cos[k] = topo->ia_params[a[k]][a[k]].chiral_cos[0];
topo->ia_params[i][j].chiral_sin[k] = topo->ia_params[a[k]][a[k]].chiral_sin[0];
}
/* Information of two patches */
if( (topo->ia_params[i][j].geotype[k] == TCPSC) || (topo->ia_params[i][j].geotype[k] == TPSC) \
|| (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){
topo->ia_params[i][j].csecpatchrot[k] = topo->ia_params[a[k]][a[k]].csecpatchrot[0];
topo->ia_params[i][j].ssecpatchrot[k] = topo->ia_params[a[k]][a[k]].ssecpatchrot[0];
topo->ia_params[i][j].pangl[k+2] = topo->ia_params[a[k]][a[k]].pangl[2];
topo->ia_params[i][j].panglsw[k+2] = topo->ia_params[a[k]][a[k]].panglsw[2];
topo->ia_params[i][j].pcangl[k+2] = cos(topo->ia_params[i][j].pangl[k+2]/2.0/180*PI);
topo->ia_params[i][j].pcanglsw[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/180*PI);
topo->ia_params[i][j].pcoshalfi[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/2.0/180*PI);
topo->ia_params[i][j].psinhalfi[k+2] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k+2] * topo->ia_params[i][j].pcoshalfi[k+2]);
}
}
len = strlen(topo->ia_params[i][i].name);
strncpy(topo->ia_params[i][j].name, topo->ia_params[i][i].name, len + 1);
len = strlen(topo->ia_params[i][i].other_name);
strncpy(topo->ia_params[i][j].other_name, topo->ia_params[i][i].other_name, len + 1);
topo->ia_params[i][j].sigma = AVER(topo->ia_params[i][i].sigma,topo->ia_params[j][j].sigma);
topo->ia_params[i][j].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->ia_params[j][j].epsilon);
topo->ia_params[i][j].pswitch = AVER(topo->ia_params[i][i].pswitch,topo->ia_params[j][j].pswitch);
topo->ia_params[i][j].rcutwca = (topo->ia_params[i][j].sigma)*pow(2.0,1.0/6.0);
// Averaging of the flat part of attraction
topo->ia_params[i][j].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, \
topo->ia_params[j][j].pdis - topo->ia_params[j][j].rcutwca) + topo->ia_params[i][j].rcutwca;
topo->ia_params[i][j].rcut = topo->ia_params[i][j].pswitch+topo->ia_params[i][j].pdis;
// if not non-attractive == if attractive
if (!((topo->ia_params[i][j].geotype[0] % 10 == 0) || (topo->ia_params[i][j].geotype[1] % 10 == 0))){
if (topo->ia_params[i][j].rcutwca > topo->ia_params[i][j].rcut){
fprintf(stderr, "Error: Repulsive cutoff is larger than the attractive cutoff!\n");
fprintf(stderr, " between %d and %d: %lf > %lf\n", i, j, topo->ia_params[i][j].rcutwca, topo->ia_params[i][j].rcut);
}
}
if ( topo->ia_params[i][j].rcutwca > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[i][j].rcutwca;
if ( topo->ia_params[i][j].rcut > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[i][j].rcut;
}
}
}
/*filling interaction with external potential*/
if( (topo->exter.exist) && (topo->ia_params[i][i].geotype[0] != 0)){
/*use everything like for given particles except distance and attraction, which is generated as for other interactions*/
topo->exter.interactions[i] = topo->ia_params[i][i];
topo->exter.interactions[i].sigma = AVER(topo->ia_params[i][i].sigma, topo->exter.thickness);
topo->exter.interactions[i].rcutwca = (topo->exter.interactions[i].sigma)*pow(2.0,1.0/6.0);
topo->exter.interactions[i].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->exter.epsilon);
topo->exter.interactions[i].pswitch = AVER(topo->ia_params[i][i].pswitch, topo->exter.attraction);
topo->exter.interactions[i].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, 0.0) + topo->exter.interactions[i].rcutwca;
topo->exter.interactions[i].rcut = topo->exter.interactions[i].pswitch + topo->exter.interactions[i].pdis;
if (topo->exter.interactions[i].rcut > topo->exter.sqmaxcut ) topo->exter.sqmaxcut = topo->exter.interactions[i].rcut;
}
}
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
if ( (*exclusions)[i][j] )
topo->ia_params[i][j].epsilon = 0.0;
}
}
}
/*initialize parameters for interactions*/
void initparams(struct topo * topo)
{
int i,j,k;
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
for(k = 0; k < 2; k++){
topo->ia_params[i][j].geotype[k] = 0;
topo->ia_params[i][j].len[k] = 0.0;
topo->ia_params[i][j].half_len[k] = 0.0;
topo->ia_params[i][j].chiral_cos[k] = 0.0;
topo->ia_params[i][j].chiral_sin[k] = 0.0;
topo->ia_params[i][j].csecpatchrot[k] = 0.0;
topo->ia_params[i][j].ssecpatchrot[k] = 0.0;
}
for(k = 2; k < 4; k++){
topo->ia_params[i][j].pangl[k] = 0.0;
topo->ia_params[i][j].panglsw[k] = 0.0;
topo->ia_params[i][j].pcangl[k] = 0.0;
topo->ia_params[i][j].pcanglsw[k] = 0.0;
topo->ia_params[i][j].pcoshalfi[k] = 0.0;
topo->ia_params[i][j].psinhalfi[k] = 0.0;
}
topo->ia_params[i][j].sigma = 0.0;
topo->ia_params[i][j].epsilon = 0.0;
topo->ia_params[i][j].rcutwca = 0.0;
topo->ia_params[i][j].pdis = 0.0;
topo->ia_params[i][j].pswitch = 0.0;
topo->ia_params[i][j].rcut = 0.0;
topo->ia_params[i][j].volume = 0.0;
topo->ia_params[i][j].pvolscale = 0.0;
}
}
topo->sqmaxcut = 0;
}
/*...........................................................................*/
/*filling the system parameters*/
int fillsystem(char *pline, char *sysnames[MAXN], long **sysmoln)
{
int i,fields;
char zz[STRLEN];
void trim (char *);
trim(pline);
if (!pline) {
fprintf (stderr, "TOPOLOGY ERROR: obtained empty line in fil system.\n\n");
return 0;
}
i=0;
while (sysnames[i]!=NULL) i++;
fields = sscanf(pline, "%s %ld", zz, &(*sysmoln)[i]);
sysnames[i]=malloc(strlen(zz)+1);
strcpy(sysnames[i],zz);
if (fields != 2) {
fprintf (stderr, "TOPOLOGY ERROR: failed reading system from (%s).\n\n", pline);
return 0;
}
if ((*sysmoln)[i] < 1) {
fprintf (stderr, "TOPOLOGY ERROR: cannot have %ld number of molecules.\n\n", (*sysmoln)[i]);
return 0;
}
fprintf (stdout, "system: %s %ld\n",sysnames[i],(*sysmoln)[i]);
return 1;
}
/*filling the parameters for molecules*/
int fillmol(char *molname, char *pline, struct molecule * molecules, struct topo * topo)
{
DEBUG_INIT("fillmol just has been called!");
char str[STRLEN],str2[STRLEN],molcommand[STRLEN],molparams[STRLEN];
int i,j,fields;
double bondk,bonddist;
void trim (char *);
void upstring(char *);
void beforecommand(char *, char *, char);
void aftercommand(char *, char *, char);
beforecommand(str2, pline, CLOSEMOL);
aftercommand(str, str2, OPENMOL);
trim(str);
if (strlen(str) == 0) return 1;
beforecommand(molcommand,str,SEPARATOR);
aftercommand(molparams,str,SEPARATOR);
trim(molcommand);
trim(molparams);
upstring (molcommand);
DEBUG_INIT("molcommand: %s", molcommand);
DEBUG_INIT("molparams: %s", molparams);
i=0;
while (strcmp(molecules[i].name, molname))
i++;
j=0;
while (molecules[i].type[j] != -1)
j++;
if (!strcmp(molcommand,"PARTICLES")) {
fprintf (stdout, "particle %d: \t", j + 1);
fields = sscanf(molparams,"%ld %ld %lf",molecules[i].type + j, molecules[i].switchtype + j, molecules[i].delta_mu + j);
fprintf (stdout, "%ld ",molecules[i].type[j]);
if (fields == 1){
(molecules[i].switchtype[j]) = (molecules[i].type[j]);
(molecules[i].delta_mu[j]) = 0;
fields = 3;
} else{
fprintf(stdout, "(with switchtype: %ld and delta_mu: %lf)", molecules[i].switchtype[j], molecules[i].delta_mu[j]);
}
if (fields != 3) {
fprintf (stderr, "TOPOLOGY ERROR: could not read a pacticle.\n\n");
return 0;
}
fflush(stdout);
if (molecules[i].type[j] < 0) {
fprintf (stderr, "TOPOLOGY ERROR: pacticles include negative type.\n\n");
return 0;
}
if (molecules[i].type[j] > MAXT) {
fprintf (stderr, "TOPOLOGY ERROR: pacticles include type out of range 0-%ld.\n\n",(long)MAXT);
return 0;
}
fprintf (stdout, "\n");
return 1;
}
if (!strcmp(molcommand,"BOND1")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond1, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].bond1c = bondk;
topo->chainparam[i].bond1eq = bonddist;
fprintf (stdout, "bond1: %f %f \n",topo->chainparam[i].bond1c,topo->chainparam[i].bond1eq);
return 1;
}
if (!strcmp(molcommand,"BOND2")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond2, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].bond2c = bondk;
topo->chainparam[i].bond2eq = bonddist;
fprintf (stdout, "bond2: %f %f \n",topo->chainparam[i].bond2c,topo->chainparam[i].bond2eq);
return 1;
}
if (!strcmp(molcommand,"BONDD")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bondd, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].bonddc = bondk;
topo->chainparam[i].bonddeq = bonddist;
fprintf (stdout, "bondd: %f %f \n",topo->chainparam[i].bonddc,topo->chainparam[i].bonddeq);
return 1;
}
if (!strcmp(molcommand,"ANGLE1")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle1, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].angle1c = bondk;
topo->chainparam[i].angle1eq = bonddist/180.0*PI;
fprintf (stdout, "angle1: %f %f \n",topo->chainparam[i].angle1c,topo->chainparam[i].angle1eq);
return 1;
}
if (!strcmp(molcommand,"ANGLE2")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle2, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].angle2c = bondk;
topo->chainparam[i].angle2eq = bonddist/180.0*PI;
fprintf (stdout, "angle2: %f %f \n",topo->chainparam[i].angle2c,topo->chainparam[i].angle2eq);
return 1;
}
fprintf (stderr, "TOPOLOGY ERROR: unknown parameter: %s.\n\n",molcommand);
return 0;
}
/* Converts the geometrical type string into a number */
int convert_geotype(char * geotype){
if (strcmp(geotype, "CPSC") == 0)
return CPSC;
if (strcmp(geotype, "CHCPSC") == 0)
return CHCPSC;
if (strcmp(geotype, "SCA") == 0)
return SCA;
if (strcmp(geotype, "PSC") == 0)
return PSC;
if (strcmp(geotype, "CHPSC") == 0)
return CHPSC;
if (strcmp(geotype, "TCPSC") == 0)
return TCPSC;
if (strcmp(geotype, "TCHCPSC") == 0)
return TCHCPSC;
if (strcmp(geotype, "TPSC") == 0)
return TPSC;
if (strcmp(geotype, "TCHPSC") == 0)
return TCHPSC;
if (strcmp(geotype, "SPN") == 0)
return SPN;
if (strcmp(geotype, "SPA") == 0)
return SPA;
return 0;
}
/*filling the parameters of external potentail - wall. Returns 1 on succes.*/
int fillexter(char **pline, struct topo * topo)
{
int fields;
double param[3];
/* 0: thickness
* 1: epsilon
* 2: attraction
*/
char typestr[STRLEN], paramstr[STRLEN];
void trim (char *);
void beforecommand(char *, char *, char);
void aftercommand(char *, char *, char);
beforecommand(typestr, *pline, SEPARATOR);
aftercommand(paramstr, *pline, SEPARATOR);
fields = sscanf(paramstr, "%le %le %le", ¶m[0], ¶m[1], ¶m[2]);
if (fields >3) {
fprintf (stderr, "TOPOLOGY ERROR: too many parameters for external potential. We have \
thickness, epsilon, and attraction distance so far.\n\n");
return 0;
}
if (fields >0) {
topo->exter.exist = TRUE;
topo->exter.thickness = param[0];
fprintf(stdout, "External potential with thickness: %le ",topo->exter.thickness);
if (fields >1) {
topo->exter.epsilon = param[1];
fprintf(stdout, "epsilon: %le ",topo->exter.epsilon);
if (fields >2) {
topo->exter.attraction = param[2];
fprintf(stdout, "and range of attraction: %le ",topo->exter.attraction);
}
}
} else{
topo->exter.exist = FALSE;
fprintf(stdout, "No external potential ");
}
fprintf(stdout, " \n");
DEBUG_INIT("Finished filling external potential");
return 1;
}
/*filling pair for which we exlude attraction interaction. Returns 1 on succes.*/
int fillexclusions(char **pline, BOOL (*exlusions)[MAXT][MAXT])
{
long num1,num2;
char *pline1, *pline2;
void trim (char *);
num1 = strtol(*pline, &pline2, 10);
trim(pline2);
if ((int)strlen(pline2) > 0) {
num2 = strtol(pline2, &pline1, 10);
trim(pline1);
(*exlusions)[num1][num2]=TRUE;
(*exlusions)[num2][num1]=TRUE;
fprintf(stderr, "Exclusions %ld %ld \n", num1, num2);
} else {
fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n");
return 0;
}
while ((int)strlen(pline1) > 0) {
num1 = strtol(pline1, &pline2, 10);
trim(pline2);
if ((int)strlen(pline2) > 0) {
num2 = strtol(pline2, &pline1, 10);
trim(pline1);
(*exlusions)[num1][num2]=TRUE;
(*exlusions)[num2][num1]=TRUE;
fprintf(stderr, "Exclusions %ld %ld \n", num1, num2);
} else {
fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n");
return 0;
}
}
return 1;
}
/*filing the parameters for types from given strings. Returns 1 on succes.*/
int filltypes(char **pline, struct topo * topo)
{
int type;
int geotype_i;
int fields;
char name[SMSTR];
char geotype[SMSTR];
double param[11];
/* 0: epsilon
* 1: sigma
* 2: attraction dist
* 3: sttraction switch
* 4: patch angle
* 5: patch switch
* 6: length
* 7(optional): second patche rotation
* 8(optional): second patch angle
* 9(optional): second patch angle switch
* +1: chirality
*/
char typestr[STRLEN], paramstr[STRLEN];
void trim (char *);
void beforecommand(char *, char *, char);
void aftercommand(char *, char *, char);
beforecommand(typestr, *pline, SEPARATOR);
aftercommand(paramstr, *pline, SEPARATOR);
fields = sscanf(paramstr, "%s %d %s %le %le %le %le %le %le %le %le %le %le %le", name, &type, geotype, ¶m[0], ¶m[1], ¶m[2], ¶m[3], ¶m[4], ¶m[5], ¶m[6], ¶m[7], ¶m[8], ¶m[9], ¶m[10]);
fields -= 5; // number of parameter fields => I am too lazy to adjust everywhere below the numbers
//DEBUG fprintf (stdout, "Topology read geotype: %ld with parameters fields %d, str:%s and %s in pline %s\n",geotype,fields,geotypestr,paramstr,pline);
geotype_i = convert_geotype(geotype);
if(!geotype_i){
fprintf(stderr, "TOPOLOGY ERROR: Unknown GEOTYPE: %s!", geotype);
return 0;
}
DEBUG_INIT("geotype_i: %d; fields = %d", geotype_i, fields);
if (( (geotype_i == SCN) || (geotype_i == SPN) ) && (fields != 0)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 1.\n\n", geotype);
return 0;
}
if (( (geotype_i == SCA) || (geotype_i == SPA)) && (fields != 2)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 3.\n\n", geotype);
return 0;
}
if (( (geotype_i == PSC) || (geotype_i == CPSC) ) && (fields != 5)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 5.\n\n", geotype);
return 0;
}
if (( (geotype_i == CHCPSC) || (geotype_i == CHCPSC) )&& ( fields != 6)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 6.\n\n", geotype);
return 0;
}
if (( (geotype_i == TPSC) || (geotype_i == TCPSC) ) && (fields != 8)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 8.\n\n", geotype);
return 0;
}
if (( (geotype_i == TCHCPSC) || (geotype_i == TCHCPSC) )&& ( fields != 9)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 9.\n\n", geotype);
return 0;
}
if ((geotype_i < 0) || (geotype_i > (MAXT + 10))) {
fprintf (stderr, "TOPOLOGY ERROR: geotype (%s) is out of range: 0 - %d.\n\n", geotype, MAXT + 10);
return 0;
}
strcpy(topo->ia_params[type][type].name, name);
strcpy(topo->ia_params[type][type].other_name, name);
topo->ia_params[type][type].geotype[0] = geotype_i;
topo->ia_params[type][type].geotype[1] = geotype_i;
topo->ia_params[type][type].epsilon = param[0];
topo->ia_params[type][type].sigma = param[1];
topo->ia_params[type][type].rcutwca = (topo->ia_params[type][type].sigma)*pow(2.0,1.0/6.0);
fprintf(stdout, "Topology read of %d: %s (geotype: %s, %d) with parameters %lf %lf", type, name, geotype, geotype_i, topo->ia_params[type][type].epsilon, topo->ia_params[type][type].sigma);
if (fields > 0) {
topo->ia_params[type][type].pdis = param[2];
topo->ia_params[type][type].pswitch = param[3];
topo->ia_params[type][type].rcut = topo->ia_params[type][type].pswitch+topo->ia_params[type][type].pdis;
fprintf(stdout, " %f %f",topo->ia_params[type][type].pdis,topo->ia_params[type][type].pswitch);
}
if (fields > 2) {
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].len[i] = param[6];
topo->ia_params[type][type].half_len[i] = param[6] / 2;
topo->ia_params[type][type].pangl[i] = param[4];
topo->ia_params[type][type].panglsw[i] = param[5];
topo->ia_params[type][type].pcangl[i] = cos(param[4]/2.0/180*PI); // C1
topo->ia_params[type][type].pcanglsw[i] = cos((param[4]/2.0+param[5])/180*PI); // C2
//topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i];
//topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i];
topo->ia_params[type][type].pcoshalfi[i] = cos((param[4]/2.0+param[5])/2.0/180*PI);
topo->ia_params[type][type].psinhalfi[i] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i] * topo->ia_params[type][type].pcoshalfi[i]);
}
fprintf(stdout, " %f %f", topo->ia_params[type][type].pangl[0], topo->ia_params[type][type].panglsw[0]);
}
if(fields == 6){
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].chiral_cos[i] = cos(param[7] / 360 * PI);
topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]);
fprintf(stdout, " %f ", param[7]);
}
}
if ((fields == 8)||(fields == 9)) {
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].csecpatchrot[i] = cos(param[7] / 360 * PI);
topo->ia_params[type][type].ssecpatchrot[i] = sqrt(1 - topo->ia_params[type][type].csecpatchrot[i] * topo->ia_params[type][type].csecpatchrot[i]);
//fprintf(stdout, " %f %f", topo->ia_params[type][type].csecpatchrot[0], topo->ia_params[type][type].ssecpatchrot[0]);
topo->ia_params[type][type].pangl[i+2] = param[8];
topo->ia_params[type][type].panglsw[i+2] = param[9];
topo->ia_params[type][type].pcangl[i+2] = cos(param[8]/2.0/180*PI); // C1
topo->ia_params[type][type].pcanglsw[i+2] = cos((param[8]/2.0+param[9])/180*PI); // C2
//topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i];
//topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i];
topo->ia_params[type][type].pcoshalfi[i+2] = cos((param[8]/2.0+param[9])/2.0/180*PI);
topo->ia_params[type][type].psinhalfi[i+2] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i+2] * topo->ia_params[type][type].pcoshalfi[i+2]);
}
fprintf(stdout, " %f %f %f", param[7], topo->ia_params[type][type].pangl[2], topo->ia_params[type][type].panglsw[2]);
}
if(fields == 9){
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].chiral_cos[i] = cos(param[10] / 360 * PI);
topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]);
fprintf(stdout, " %f ", param[9]);
}
}
// Volume
if (geotype_i < SP)
topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0) + PI/2.0*topo->ia_params[type][type].len[0]*pow((topo->ia_params[type][type].sigma)/2.0,2.0) ;
else
topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0);
if ( topo->ia_params[type][type].rcutwca > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[type][type].rcutwca;
if ( topo->ia_params[type][type].rcut > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[type][type].rcut;
fprintf(stdout, " \n");
DEBUG_INIT("Finished filltypes");
return 1;
}
/************************************************
* String Manipulation stuff for parsing files
************************************************/
/* return string that goes before comand character*/
void beforecommand(char *str,char *pline,char commandc)
{
char *dummy;
void trim(char *);
strcpy(str,pline);
if ((dummy = strchr (str,commandc)) != NULL) (*dummy) = 0;
trim (str);
}
/* return string that goes after command character */
void aftercommand(char *str, char *pline,char commandc)
{
char *dummy;
int i;
void trim(char *);
strcpy(str,pline);
if ((dummy = strchr (str,commandc)) != NULL) {
i=0;
while( (*dummy) != str[i]) {
str[i] = ' ';
i++;
}
str[i] = ' ';
}
trim (str);
}
/* reads a string from stream of max length n */
char *fgets2(char *line, int n, FILE *stream)
{
char *c;
if (fgets(line,n,stream)==NULL) {
return NULL;
}
if ((c=strchr(line,'\n'))!=NULL)
*c=0;
return line;
}
/* remove comments */
void strip_comment (char *line)
{
char *c;
if (!line) return;
/* search for a comment mark and replace it by a zero */
if ((c = strchr(line,COMMENTSIGN)) != NULL) (*c) = 0;
}
/*test is there is still something left in string*/
int continuing(char *s)
{
int sl;
void rtrim (char *str);
rtrim(s);
sl = strlen(s);
if ((sl > 0) && (s[sl-1] == CONTINUE)) {
s[sl-1] = 0;
return 1; /*true*/
} else return 0; /*false*/
}
/*make strin uppercase*/
void upstring (char *str)
{
int i;
for (i=0; (i < (int)strlen(str)); i++) str[i] = toupper(str[i]);
}
/*trim string from left*/
void ltrim (char *str)
{
char *tr;
int c;
if (!str) return;
tr = strdup (str);
c = 0;
while ((tr[c] == ' ') || (tr[c] == '\n') || (tr[c] == '\t')) c++;
strcpy (str,tr+c);
free (tr);
}
/*trim string from right*/
void rtrim (char *str)
{
int nul;
if (!str) return;
nul = strlen(str)-1;
while ((nul > 0) && ((str[nul] == ' ') || (str[nul] == '\t') || (str[nul] == '\n')) ) {
str[nul] = '\0';
nul--;
}
}
/*trim strin from left and right*/
void trim (char *str)
{
void ltrim (char *str);
void rtrim (char *str);
ltrim (str);
rtrim (str);
}
/**
* Dumps a configuration to the supplied file handle.
*/
void draw(FILE *outfile, /*struct vector box, long npart,
struct particles *particle,*/ struct conf * conf, struct topo * topo)
{
long i;
double anint(double);
//fprintf (outfile, "%15.8le %15.8le %15.8le\n", box.x, box.y, box.z);
for (i = 0; i < topo->npart; i++) {
fprintf (outfile, "%15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %d\n",
conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)),
conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)),
conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)),
conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z,
conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z,
conf->particle[i].switched);
}
}
/*............................................................................*/
/****************************************************************************/
/* Pairlist stuf */
/****************************************************************************/
/**
* Initializes the pairlist and allocates memory
*/
void init_pairlist(struct topo * topo, struct sim * sim){
printf("\nAllocating memory for pairlist...\n");
sim->pairlist = xmalloc(sizeof(struct pairs) * topo->npart);
// Highest guess: Every particle interacts with the others
// TODO: Make it more sophisticated
long i;
for(i = 0; i < topo->npart; i++){
sim->pairlist[i].pairs = malloc(sizeof(long) * topo->npart);
sim->pairlist[i].num_pairs = 0;
}
}
/*............................................................................*/
/**
* Cleans up: deallocates the memory for the pairlist
*/
int dealloc_pairlist(struct topo * topo, struct sim * sim){
long i;
if(sim->pairlist != NULL){
for(i = 0; i < topo->npart; i++){
if(sim->pairlist[i].pairs != NULL){
free(sim->pairlist[i].pairs);
}
}
free(sim->pairlist);
}
return 0;
}
/*............................................................................*/
/**
* Generates a pairlist with a very basic alogrithm
*/
void gen_simple_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){
struct vector r_cm;
double r_cm2;
double max_dist;
// Set the pairlist to zero
//DEBUG_INIT("Gen Pairlist")
long i, j;
for(i = 0; i < topo->npart; i++){
//DEBUG_INIT("%ld", i);
sim->pairlist[i].num_pairs = 0;
}
long nj = topo->npart;
long ni = nj - 1;
for(i = 0; i < ni; i++){
for(j = i + 1; j < nj; j++){
r_cm.x = conf->particle[i].pos.x - conf->particle[j].pos.x;
r_cm.y = conf->particle[i].pos.y - conf->particle[j].pos.y;
r_cm.z = conf->particle[i].pos.z - conf->particle[j].pos.z;
if ( r_cm.x < 0 )
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) );
else
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) );
if ( r_cm.y < 0 )
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) );
else
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) );
if ( r_cm.z < 0 )
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) );
else
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) );
r_cm2 = DOT(r_cm,r_cm);
max_dist = AVER(sim->trans[conf->particle[i].type].mx, \
sim->trans[conf->particle[j].type].mx);
max_dist *= (1 + sim->pairlist_update) * 2;
max_dist += topo->maxcut;
max_dist *= max_dist; /* squared */
if (r_cm2 <= max_dist){
sim->pairlist[i].pairs[sim->pairlist[i].num_pairs++] = j;
sim->pairlist[j].pairs[sim->pairlist[j].num_pairs++] = i;
}
}
}
////Check for too many pairs
//for(i = 0; i < topo->npart; i++){
// //if (sim->pairlist.list[i].num_pairs >= topo->npart)
// if (sim->pairlist[i].num_pairs >= topo->npart){
// fprintf(stderr, "ERROR: Too many pairs for particle %ld!!!\n", i);
// exit(1);
// }
//}
}
/*.............................................................................*/
/**
* Interface for the generation of the pairlist. Define other pairlist
* algorithms above.
*/
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){
gen_simple_pairlist(topo, sim, conf);
}
/*.............................................................................*/
/**
* Print out the pairlist
*/
void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo){
long i, j;
for (i = 0; i < topo->npart; i++){
fprintf(stream, "%ld (%ld):", i, sim->pairlist[i].num_pairs);
for(j = 0; j < sim->pairlist[i].num_pairs; j++){
fprintf(stream, " %ld", sim->pairlist[i].pairs[j]);
}
fprintf(stream, "\n");
}
}
/*..........................................................................*/
/****************************************************************************/
/* Cluster statistics stuf */
/****************************************************************************/
/**
* determines, wheter two particles are in the same cluster
*/
int same_cluster(struct topo * topo, struct conf * conf,
long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *) ){
/*if two particles are bonded they belong to the same cluster*/
if ( ((topo->chainparam[conf->particle[fst].chaint]).bond1c >= 0) ||
((topo->chainparam[conf->particle[fst].chaint]).bonddc >= 0) ){
if ( (snd == topo->conlist[fst][1]) || (snd == topo->conlist[fst][0]) ) {
return TRUE;
}
}
if ( ((topo->chainparam[conf->particle[snd].chaint]).bond1c >= 0) ||
((topo->chainparam[conf->particle[snd].chaint]).bonddc >= 0) ){
if ( (fst == topo->conlist[snd][1]) || (fst == topo->conlist[snd][0]) ) {
return TRUE;
}
}
/*cluster is made of particles closer tna some distance*/
/* struct vector image(struct vector r1, struct vector r2, struct vector box);
struct vector r_cm = image(conf->particle[fst].pos,
conf->particle[snd].pos,
conf->box);
double dist2 = DOT(r_cm, r_cm);
* TODO: Make it much more efficient => define cluster_dist!!! *
if(dist2 > topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma * topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma*4.0){
return FALSE;
}
else {
return TRUE;
}*/
/*cluster is made of attractively interacting particles*/
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
if(paire(fst, snd, intfce, topo, conf) > -0.10 ){
return FALSE;
}
else {
return TRUE;
}
}
/*............................................................................*/
/**
* generate the clusterlist
*/
int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){
int change = TRUE; /* does it still change? */
//long neighbour;
long i, j, fst, snd, tmp, minnumber, maxnumber;
int same_cluster(struct topo * topo, struct conf * conf,
long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *));
// Set clusterindex to the corresponding index
for( i = 0; i < topo->npart; i++){
sim->clusterlist[i] = i;
}
// Start determining the cluster
while(change){
change = FALSE;
for(i = 0; i < topo->npart; i++){
/*If nore pairlist go over all pairs*/
maxnumber = topo->npart;
minnumber = i ;
if (sim->pairlist_update) {
maxnumber = sim->pairlist[i].num_pairs;
minnumber=0;
}
/* Go over pairs to see if they are in the cluster */
for(j = minnumber; j < maxnumber; j++){
fst = i;
snd = j;
if (sim->pairlist_update) {
snd = sim->pairlist[i].pairs[j];
}
/*do cluster analysis only for spherocylinders*/
if ( (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[0] < SP) && \
(topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[1] < SP) ) {
/* if they are close to each other */
if(same_cluster(topo, conf, fst, snd, intfce)){
if(fst > snd){
tmp = snd;
snd = fst;
fst = tmp;
}
if(sim->clusterlist[fst] < sim->clusterlist[snd]){
sim->clusterlist[snd] = sim->clusterlist[fst];
change = TRUE;
break;
/* => will eventually start the i loop from new */
}
if(sim->clusterlist[snd] < sim->clusterlist[fst]){
sim->clusterlist[fst] = sim->clusterlist[snd];
change = TRUE;
break;
/* => will eventually start the i loop from new */
}
}
}
}
if(change){
break;
}
}
}
return 0;
}
/*............................................................................*/
/**
* sort the clusterlist
*/
int sort_clusterlist(struct topo * topo, struct sim * sim){
long cluster_indices[topo->npart]; /* holds the different cluster indices.
(currently too much memory) */
long num_cluster = 0; /* number of clusters, temporary needed */
long i, j;
/* how many clusters are there? */
long max_index = -1;
for(i = 0; i < topo->npart; i++){
if(max_index < sim->clusterlist[i]){
max_index = sim->clusterlist[i];
cluster_indices[num_cluster++] = max_index;
}
}
/* free the memory from the old clusters */
if(sim->clusters){
for(i = 0; i < sim->num_cluster; i++){
if(sim->clusters[i].particles){
free(sim->clusters[i].particles);
}
}
free(sim->clusters);
}
/* Allocate memory for the clusters */
sim->clusters = xmalloc(sizeof(struct cluster) * num_cluster);
for(i = 0; i < num_cluster; i++){
/* allocate maximal space for all the clusters */
sim->clusters[i].particles = xmalloc(sizeof(long) * topo->npart);
sim->clusters[i].npart = 0;
}
/* fill in the particles belonging to one cluster */
for(i = 0; i < num_cluster; i++){
for(j = 0; j < topo->npart; j++){
if(sim->clusterlist[j] == cluster_indices[i]){
sim->clusters[i].particles[sim->clusters[i].npart++] = j;
}
}
}
sim->num_cluster = num_cluster;
/* Find the biggest size */
sim->max_clust = 0;
for(i = 0; i < num_cluster; i++){
if(sim->clusters[i].npart > sim->max_clust){
sim->max_clust = sim->clusters[i].npart;
}
}
/* Set the statistics to zero */
sim->clusterstat = xmalloc(sizeof(long) * sim->max_clust);
for(i = 0; i < sim->max_clust; i++){
sim->clusterstat[i] = 0;
}
/* Do the statistics */
for(i = 0; i < num_cluster; i++){
sim->clusterstat[sim->clusters[i].npart - 1]++;
}
return 0;
}
/*............................................................................*/
/**
* calculate energies of clusters
* */
int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *)){
long i,j,k;
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
for(i = 0; i < sim->num_cluster; i++){
sim->clustersenergy[i]=0.0;
for(j = 0; j < sim->clusters[i].npart; j++){
for(k = j+1; k < sim->clusters[i].npart; k++){
sim->clustersenergy[i]+= paire(sim->clusters[i].particles[j], sim->clusters[i].particles[k], intfce, topo, conf);
}
}
}
return 0;
}
/*............................................................................*/
/**
* print the clusterlist
* */
int print_clusterlist(FILE * stream, BOOL decor,
struct topo * topo, struct sim * sim, struct conf * conf){
long i;
if(decor){
fprintf(stream, "\n"
"-----------------------------------------------------\n"
" The Cluster List\n"
" (Index starts with 1)\n"
"-----------------------------------------------------\n");
}
for(i = 0; i < topo->npart; i++){
fprintf(stream,"%3ld %3ld %8.4lf %8.4lf %8.4lf", i + 1,
sim->clusterlist[i] + 1,
conf->particle[i].pos.x,
conf->particle[i].pos.y,
conf->particle[i].pos.z);
fprintf(stream,"\n");
}
if(decor){
fprintf(stream,"-----------------------------------------------------\n");
}
fflush(stream);
return 0;
}
/*............................................................................*/
/**
* print the clusters
* */
int print_clusters(FILE * stream, BOOL decor, struct sim * sim){
long i, j;
if(decor){
fprintf(stream, "\n"
"-----------------------------------------------------\n"
" The Clusters\n"
" (Index starts with 1)\n"
"-----------------------------------------------------\n");
}
for(i = 0; i < sim->num_cluster; i++){
fprintf(stream, "%3ld(%f):", i + 1,sim->clustersenergy[i]);
for(j = 0; j < sim->clusters[i].npart; j++){
fprintf(stream, "%5ld", sim->clusters[i].particles[j] + 1);
}
fprintf(stream, "\n");
}
if(decor){
fprintf(stream,"---------------------------------------------------\n");
}
fflush(stream);
return 0;
}
/*............................................................................*/
/**
* print a statistics for the clusters
*/
int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim){
long i;
if(decor){
fprintf(stream, "\n"
"-----------------------------------------------------\n"
" Cluster Distribution\n"
"-----------------------------------------------------\n");
}
for(i = 0; i < sim->max_clust; i++){
fprintf(stream, "%5ld\t%5ld\n", i + 1, sim->clusterstat[i]);
}
if(decor){
fprintf(stream, "--------------------------------------------------\n");
}
fflush(stream);
return 0;
}
/*............................................................................*/
/**
* Alternative way of printing the cluster statistics: everything is on
* one line. First monomers, then dimers etc.
*/
int print_clstat_oneline(FILE * stream, long sweep, struct sim * sim){
long i;
fprintf(stream, "%ld: ", sweep);
for(i = 0; i < sim->max_clust; i++){
fprintf(stream, "%5ld\t", sim->clusterstat[i]);
}
fprintf(stream, "\n");
fflush(stream);
return 0;
}
/**
* write out all the cluster stat in files, if file name is given
*/
int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep,
struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){
int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *));
int sort_clusterlist(struct topo * topo, struct sim * sim);
int print_clusters(FILE * stream, BOOL decor, struct sim * sim);
int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
gen_clusterlist(topo, sim, conf, intfce);
sort_clusterlist(topo, sim);
calc_clusterenergies(topo, sim, conf, intfce);
if(cl_stat){
if(decor == FALSE){
// if no decor, this means usually into a file. Hence print info
// about number of line per frame
fprintf(cl_stat, "Sweep: %ld | Maximal size: %ld\n",
sweep, sim->max_clust);
}
print_clusterstat(cl_stat, decor, sim);
/*
print_clstat_oneline(cl_stat, sweep, sim);
*/
}
if(cl){
if(decor == FALSE){
fprintf(cl, "Sweep: %ld | Number of clusters: %ld\n",
sweep, sim->num_cluster);
}
print_clusters(cl, decor, sim);
}
if(cl_list){
if(decor == FALSE){
fprintf(cl_list, "Sweep: %ld | Number of particles: %ld\n",
sweep, topo->npart);
}
print_clusterlist(cl, decor, topo, sim, conf);
}
return 0;
}
/*............................................................................*/
/****************************************************************************/
/* Wang-Landau stuf */
/****************************************************************************/
/*
Initiate Wang-Landau calculation.
*/
int wlinit(struct wls *wl, char filename[30])
{
long i,length,fields=0;
double field[5];
FILE *infile;
char line[STRLEN];
int wlend(struct wls *);
void trim(char *);
void strip_comment(char *);
infile = fopen(filename, "r");
if (infile == NULL) {
fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename);
return 1;
}
length=0;
while (fgets2(line,STRLEN-2,infile) != NULL) {
strip_comment (line);
trim (line);
/* if there is something left... */
if ((int)strlen(line) > 0) {
length++;
}
}
length--; /*there is alpha at the first line*/
(*wl).weights = malloc( sizeof(double) * length );
(*wl).hist = malloc( sizeof(long) * length );
(*wl).length[1] = 0;
(*wl).dorder[1] = 0;
fseek(infile,0,SEEK_SET);
i=0;
while (fgets2(line,STRLEN-2,infile) != NULL) {
strip_comment (line);
trim (line);
/* if there is something left... */
if ((int)strlen(line) > 0) {
if (i == 0) {
if (sscanf(line, "%le",&(*wl).alpha)!= 1) {
fprintf (stderr, "ERROR: Could not read alpha at the begining.\n\n");
wlend(wl);
return 1;
} else i++;
} else {
fields = sscanf(line, "%le %le %le %le",&field[0],&field[1],&field[2],&field[3]);
if ( fields == 3 ) {
if (i==1)
(*wl).minorder[0] = field[0];
(*wl).weights[i-1] = field[1];
(*wl).hist[i-1] = field[2];
(*wl).length[0]++;
i++;
} else if (fields == 4 ) {
if (i==1) {
(*wl).minorder[0] = field[0];
(*wl).minorder[1] = field[1];
}
if ( (*wl).minorder[1] == field[1] )
(*wl).length[0]++;
(*wl).weights[i-1] = field[2];
(*wl).hist[i-1] = field[3];
i++;
} else {
fprintf (stderr, "ERROR: Could not read order parameter at line %ld.\n\n", i);
wlend(wl);
return 1;
}
}
}
}
if (fields == 4 ) {
(*wl).length[1] = length / (*wl).length[0];
(*wl).dorder[1] = (field[1] - (*wl).minorder[1])/((*wl).length[1]-1);
}
(*wl).dorder[0] = (field[0] - (*wl).minorder[0])/((*wl).length[0]-1);
if ( ( (i-1) != (*wl).length[0] ) && (fields==3) ) {
fprintf (stderr, "ERROR: In reading order parameters length %ld does not fit number of lines %ld.\n\n", (*wl).length[0],i-1);
wlend(wl);
return 1;
}
if ( ( (i-1) != (*wl).length[0]*(*wl).length[1] ) && (fields==4) ) {
fprintf (stderr, "ERROR: In reading order parameters lengths %ld %ld does not fit number of lines %ld.\n\n", (*wl).length[0],(*wl).length[1],i-1);
wlend(wl);
return 1;
}
/*DEBUG*/
printf("Wang-Landau method init:\n");
printf("alpha: %f\n",(*wl).alpha);
/*int j=0;
if ((*wl).length[1] == 0) {
for (i=0; i<(*wl).length[0]; i++) {
printf ("%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]);
}
} else {
for (j=0; j<(*wl).length[1]; j++) {
for (i=0; i<(*wl).length[0]; i++) {
printf ("%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]);
}
printf (" \n");
}
}*/
fclose(infile);
fflush(stdout);
/**/
return 0;
}
int wlwrite(struct wls *wl, char filename[30])
{
long i,j;
FILE *outfile;
outfile = fopen(filename, "w");
if (outfile == NULL) {
fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename);
return 1;
}
fprintf (outfile, "%15.8le \n",(*wl).alpha);
if ((*wl).length[1] == 0) {
for (i=0; i<(*wl).length[0]; i++) {
fprintf (outfile, "%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]);
}
} else {
for (j=0; j<(*wl).length[1]; j++) {
for (i=0; i<(*wl).length[0]; i++) {
fprintf (outfile, "%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]);
}
fprintf (outfile, " \n");
}
}
fflush(outfile);
fclose(outfile);
return 0;
}
int wlend(struct wls *wl)
{
free((*wl).weights);
free((*wl).hist);
return 0;
}
void wlreject(struct sim *sim, long oldlength)
{
int mesh_cpy(struct meshs *, struct meshs *);
int longarray_cpy (long **target, long **source,long,long);
if ( sim->wlm[0] > 0 ) {
sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha;
sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++;
if ( (sim->wlm[0] == 2) || (sim->wlm[1] == 2) )
mesh_cpy(&sim->wl.mesh,&sim->wl.origmesh);
if ( (sim->wlm[0] == 5) || (sim->wlm[1] == 5)||(sim->wlm[0] == 6) || (sim->wlm[1] == 6) ) {
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,oldlength);
sim->wl.radiusholemax = oldlength;
}
sim->wl.partincontact = sim->wl.partincontactold;
}
}
void wlaccept(int wlm,struct wls *wl)
{
int i;
if ( wlm > 0 ) {
for (i=0;i<2;i++)
(*wl).currorder[i] = (*wl).neworder[i];
(*wl).weights[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]] -= (*wl).alpha;
(*wl).hist[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]]++;
}
}
/*..............................................................................*/
/*........................NAMETIC ORDER.........................................*/
/*..............................................................................*/
/*
Calculates the instantaneous value of the nematic order parameter for the
specified configuration. The nematic director is determined by diagonalisation
of the tensor order parameter Q (see Allen & Tildesley p305). The order
parameter is the corresponding eigenvalue. However, it is equivalent to take
minus two times the middle eigenvalue (see Eppenga & Frenkel, Mol Phys vol.
52, p.1303-1334 [1984]), and this is more reliable for comparing the isotropic
phase. This is the approach taken in this implementation.
Routines from Numerical Recipes are used to perform the diagonalisation. Note
that these routines expect an n*n matrix to be stored in elements [1...n][1...n],
rather than [0...n-1][0...n-1], so the arrays must be declared with one more
element in each dimension.
*/
double nematic(long npart, struct particles *p)
{
double q[4][4] = {{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0},
{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}};
double d[4], e[4];
long i;
void tred2(double [4][4], double [4], double [4]);
void tqli(double [4], double [4]);
for (i=0; i<npart; i++) {
q[1][1] += p[i].dir.x * p[i].dir.x;
q[1][2] += p[i].dir.x * p[i].dir.y;
q[1][3] += p[i].dir.x * p[i].dir.z;
q[2][1] += p[i].dir.y * p[i].dir.x;
q[2][2] += p[i].dir.y * p[i].dir.y;
q[2][3] += p[i].dir.y * p[i].dir.z;
q[3][1] += p[i].dir.z * p[i].dir.x;
q[3][2] += p[i].dir.z * p[i].dir.y;
q[3][3] += p[i].dir.z * p[i].dir.z;
}
q[1][1] = (q[1][1] * 3.0 / npart - 1.0) / 2.0;
q[1][2] = (q[1][2] * 3.0 / npart ) / 2.0;
q[1][3] = (q[1][3] * 3.0 / npart ) / 2.0;
q[2][1] = (q[2][1] * 3.0 / npart ) / 2.0;
q[2][2] = (q[2][2] * 3.0 / npart - 1.0) / 2.0;
q[2][3] = (q[2][3] * 3.0 / npart ) / 2.0;
q[3][1] = (q[3][1] * 3.0 / npart ) / 2.0;
q[3][2] = (q[3][2] * 3.0 / npart ) / 2.0;
q[3][3] = (q[3][3] * 3.0 / npart - 1.0) / 2.0;
tred2 (q, d, e);
tqli (d, e);
/* Sort eigenvalues */
if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; }
if (d[2] > d[3]) { d[0]=d[2]; d[2]=d[3]; d[3]=d[0]; }
if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; }
return -2.0*d[2];
}
/*..............................................................................*/
/*
Returns the coefficient of the Fourier series term with period boxlength/n
in the z direction. The coefficients of the sine and cosine terms are added
in quadrature and returned, making the result independent of phase shifts in
the z direction. A significantly non-zero value indicates layering of the
particles in the z direction with periodicity boxlength/n.
*/
double smectic(long npart, struct particles *p, long n)
{
double a, b;
double omega = 8.0*n*atan(1.0);
long i;
a = b = 0.0;
for (i=0; i<npart; i++) {
a += cos(omega * p[i].pos.z);
b += sin(omega * p[i].pos.z);
}
a /= (double)npart;
b /= (double)npart;
return sqrt(a*a + b*b);
}
/*..............................................................................*/
/*........................Z ORDER PARAMETER.....................................*/
long z_order(struct wls *wl, struct conf * conf,int wli)
{
// printf("%f %ld\n",particle[0].pos.z * box.z,lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]));
/* Because older C compilators do not know lround we can use ceil as well
return lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]);*/
/*
printf("%f ",conf->particle[0].pos.z );
printf("%f ",conf->syscm.z);
printf("%f ",conf->box.z);
printf("%f ", wl->minorder[wli]);
printf("%f \n", wl->dorder[wli] );*/
return (long) ceil( ((conf->particle[0].pos.z - conf->syscm.z) * conf->box.z- wl->minorder[wli]) / wl->dorder[wli] );
}
/*..............................................................................*/
/*........................2 particles distance.....................................*/
long twopartdist(struct wls *wl, struct conf * conf, int wli)
{
struct vector r_cm;
r_cm.x = conf->particle[0].pos.x - conf->particle[1].pos.x;
r_cm.y = conf->particle[0].pos.y - conf->particle[1].pos.y;
r_cm.z = conf->particle[0].pos.z - conf->particle[1].pos.z;
if ( r_cm.x < 0 )
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) );
else
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) );
if ( r_cm.y < 0 )
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) );
else
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) );
if ( r_cm.z < 0 )
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) );
else
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) );
return (long) ceil( (( sqrt(r_cm.x*r_cm.x + r_cm.y*r_cm.y) ) - wl->minorder[wli]) / wl->dorder[wli] );
}
/*..............................................................................*/
/*........................alignment ORDER PARAMETER.....................................*/
double alignment_order(struct conf * conf, struct topo * topo)
{
double sumdot=0;
long i,j;
struct vector r_cm;
struct vector image(struct vector, struct vector, struct vector);
for (i = 0; i < topo->npart - 1; i++) {
for (j = i + 1; j < topo->npart; j++) {
r_cm = image(conf->particle[i].pos, conf->particle[j].pos, conf->box);
if ( DOT(r_cm,r_cm) < 1.5*1.5 ) {
sumdot+= DOT(conf->particle[i].dir,conf->particle[j].dir);
}
}
}
return sumdot;
}
/*..............................................................................*/
/*........................HOLE IN MESH-MEMBRANE ORDER PARAM.....................*/
/* return change in order parameter when one particle moves*/
long meshorder_moveone(struct vector oldpos, struct vector newpos, struct meshs *mesh,
long npart, long target, struct conf * conf, struct sim * sim, int wli)
{
int change;
int nx,ny,ox,oy; /* position in mesh */
double resid;
void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim);
int mesh_findholes(struct meshs *);
int mesh_addpart(double, double, int **, int [2]);
int mesh_removepart(double, double, int **, int [2]);
if ( conf->particle[target].type != sim->wl.wlmtype )
return sim->wl.currorder[wli];
nx = (int) (INBOX(newpos.x,resid) * (*mesh).dim[0]);
ny = (int) (INBOX(newpos.y,resid) * (*mesh).dim[1]);
ox = (int) (INBOX(oldpos.x,resid) * (*mesh).dim[0]);
oy = (int) (INBOX(oldpos.y,resid) * (*mesh).dim[1]);
if ( (nx == ox) && (ny == oy) ) return sim->wl.currorder[wli]; /* particle stayed in the same mesh bin*/
change = mesh_addpart(newpos.x,newpos.y,&(*mesh).data,(*mesh).dim);
if (change) {
change = mesh_removepart(oldpos.x,oldpos.y,&(*mesh).data,(*mesh).dim);
}
if ( !change ) {
/* fill the mesh with particles*/
mesh_fill(mesh,npart,conf->particle, sim);
return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]);
}
return sim->wl.currorder[wli];
}
/* return change in order parameter when chain moves*/
long meshorder_movechain(long chain[MAXN], struct meshs *mesh,
long npart, struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL], int wli)
{
long i,current;
int change;
void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim);
int mesh_findholes(struct meshs *);
int mesh_addpart(double, double, int **, int [2]);
int mesh_removepart(double, double, int **, int [2]);
change= 1;
i = 0;
current = chain[0];
while ( (current >=0 ) && (change) ) {
if ( conf->particle[current].type == sim->wl.wlmtype )
change = mesh_addpart(conf->particle[current].pos.x, conf->particle[current].pos.y, &(*mesh).data, (*mesh).dim);
i++;
current = chain[i];
}
i = 0;
current = chain[0];
while ( (current >=0 ) && (change) ) {
if ( conf->particle[current].type == sim->wl.wlmtype )
change = mesh_removepart(chorig[i].pos.x, chorig[i].pos.y, &(*mesh).data, (*mesh).dim);
i++;
current = chain[i];
}
if ( !change ) {
/* fill the mesh with particles*/
mesh_fill(mesh,npart,conf->particle, sim);
return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]);
}
return sim->wl.currorder[wli];
}
/* filling the mesh */
void mesh_fill(struct meshs *mesh, long npart, struct particles *particle, struct sim * sim)
{
long i;
int mesh_addpart(double posx, double posy, int **mesh, int dim[2]);
for ( i=0; i<((*mesh).dim[0] * (*mesh).dim[1]); i++) {
(*mesh).data[i] = 0;
}
for (i=0; i<npart; i++) {
/*calculate position of particle on mesh and add it to all where it belongs */
if (particle[i].type == sim->wl.wlmtype)
mesh_addpart(particle[i].pos.x,particle[i].pos.y, &(*mesh).data, (*mesh).dim);
}
}
/* add particle on coordinates posx posy to mesh return 0 if it was placed on empty spot*/
int mesh_addpart(double posx, double posy, int **mesh, int dim[2])
{
int i, square[9], onhole;
double resid;
void mesh_square(int , int , int [2], int (*)[9]);
onhole = 1;
mesh_square( (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square);
for(i=0;i<9;i++) {
if ( (square[i] >= dim[0]*dim[1])||(square[i] <0) ) {
printf ("Error: trying to write to %d\n",square[i]);
printf ("%d %d and %d\n", (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]),i );
fflush(stdout);
}
if ( ((*mesh)[ square[i] ]) >= 0 ) onhole = 0;
(*mesh)[ square[i] ]--;
}
return onhole;
}
/* remove particle on coordinates posx posy from mesh and return 0 if there is a empty spot now*/
int mesh_removepart(double posx, double posy, int **mesh, int dim[2])
{
int i, square[9];
double resid;
void mesh_square(int , int , int [2], int (*)[9]);
mesh_square((int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square);
for(i=0;i<9;i++) {
//DEBUG if (square[i] >= dim[0]*dim[1]) printf ("Error: trying to write to %d\n",square[i]);
(*mesh)[ square[i] ]++;
if ( ((*mesh)[ square[i] ]) == 0 ) return 0;
}
return 1;
}
void mesh_square(int x, int y, int dim[2], int (*square)[9])
{
int a,b;
b=y;
(*square)[0] = x + dim[0]*b;
a = x-1;
if ( a<0 ) a = dim[0]-1;
(*square)[1] = a + dim[0]*b;
a = x+1;
if ( a==dim[0] ) a = 0;
(*square)[2] = a + dim[0]*b;
b = y-1;
if ( b<0 ) b = dim[1]-1;
(*square)[3] = x + dim[0]*b;
a = x-1;
if ( a<0 ) a = dim[0]-1;
(*square)[4] = a + dim[0]*b;
a = x+1;
if ( a==dim[0] ) a = 0;
(*square)[5] = a + dim[0]*b;
b = y+1;
if ( b==dim[1] ) b = 0;
(*square)[6] = x + dim[0]*b;
a = x-1;
if ( a<0 ) a = dim[0]-1;
(*square)[7] = a + dim[0]*b;
a = x+1;
if ( a==dim[0] ) a = 0;
(*square)[8] = a + dim[0]*b;
}
void mesh_neighbors(int pos, int dim[2], int neighbors[4])
{
int x,y,a;
x = pos % dim[0];
y = pos / dim[0];
a = x-1;
if ( a<0 ) a = dim[0]-1;
neighbors[0] = a + dim[0]*y;
a = x+1;
if ( a==dim[0] ) a = 0;
neighbors[1] = a + dim[0]*y;
a = y-1;
if ( a<0 ) a = dim[1]-1;
neighbors[2] = x + dim[0]*a;
a = y+1;
if ( a==dim[1] ) a = 0;
neighbors[3] = x + dim[0]*a;
}
/* returns the number of holes and a list of mesh points belonging to each of them */
int mesh_findholes(struct meshs *mesh)
{
int i,j, k, n, size, li, maxsize;
int neighbors[4];
void mesh_neighbors(int, int [2], int [4]);
n=0;
maxsize = 0;
for (i=0;i<((*mesh).dim[0] * (*mesh).dim[1]);i++) {
(*mesh).tmp[i] = 0;
if ( (*mesh).data[i] > 0 ) (*mesh).data[i] = 0;
}
i=0;
// go through all mesh points
while ( i < ((*mesh).dim[0] * (*mesh).dim[1]) ) {
// test if mesh point is occupied
if ( (*mesh).data[i] != 0 ) { i++; }
else {
// mesh point is free, create a new cluster
n++;
(*mesh).data[i] = n;
// start new cluster, put mesh point as first element, and set list pointer on first element
//DEBUG if (n >= mesh.dim[0]*mesh.dim[1]) printf ("Error: trying to write to sizes position %d\n",n);
size = 1;
(*mesh).tmp[0] = i;
li = 0;
// go through all elements of the cluster
while ( li < size ) {
//go through all neighbors
j = (*mesh).tmp[li];
mesh_neighbors(j, (*mesh).dim, neighbors);
for ( k=0; k<4; k++ ) {
// test if status is free and append it to the cluster
if ( (*mesh).data[ neighbors[k] ] == 0 ) {
(*mesh).data[ neighbors[k] ] = n;
// append mesh point as element in the list
(*mesh).tmp[size] = neighbors[k];
size++;
}
if ( (*mesh).data[ neighbors[k] ] > 0 && (*mesh).data[ neighbors[k] ]<n ) {
fprintf(stderr,"Error: Mesh cluster out of range, propably going infinite through pbc.");
fflush(stderr);
}
}
li++;
}
if (size > maxsize) maxsize = size;
}
}
return maxsize;
}
int mesh_init(struct meshs *mesh, double meshsize, long npart, struct conf * conf, struct sim * sim)
{
// int i;
int maxsize,length;
void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim);
int mesh_findholes(struct meshs *);
(*mesh).dim[0] = (int)(conf->box.x/meshsize);
(*mesh).dim[1] = (int)(conf->box.y/meshsize);
if ( (*mesh).data != NULL ) free((*mesh).data);
if ( (*mesh).tmp != NULL ) free((*mesh).tmp);
length = (*mesh).dim[0] * (*mesh).dim[1];
(*mesh).data = malloc( sizeof(int)* (length));
(*mesh).tmp = malloc( sizeof(int)* (length+1));
/* fill the mesh with particles*/
mesh_fill(mesh, npart,conf->particle, sim);
/* perfrom hole cluster algorithm */
maxsize = mesh_findholes(mesh);
/*DEBUG printf("maxsize: %d\n",maxsize);
printf("mesh:\n");
for (i=0;i<mesh.dim[0]*mesh.dim[1];i++) {
printf("%d ",mesh.data[i]);
if ( ((i+1) % mesh.dim[0]) == 0) printf("\n");
}*/
return maxsize;
}
void mesh_print (struct meshs *mesh)
{
int i;
int mesh_findholes(struct meshs *);
printf("mesh:\n");
for (i=0;i<(*mesh).dim[0] * (*mesh).dim[1];i++) {
printf("%d ",(*mesh).data[i]);
if ( ((i+1) % (*mesh).dim[0]) == 0) printf("\n");
}
printf("hole %d:\n", mesh_findholes(mesh) );
printf("\n");
}
int mesh_cpy (struct meshs *target, struct meshs *source)
{
if ( (*target).data != NULL) {
if ( ((*target).dim[0] == (*source).dim[0]) && ((*target).dim[1] == (*source).dim[1]) ) {
memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) );
return 0;
} else {
free ((*target).data);
if ( (*source).dim[0] * (*source).dim[1] > (*target).dim[0] * (*target).dim[1] ) {
if ((*target).tmp != NULL ) free ((*target).tmp);
(*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1));
}
}
}
(*target).dim[0] = (*source).dim[0];
(*target).dim[1] = (*source).dim[1];
(*target).data = malloc( sizeof(int)* ((*target).dim[0] * (*target).dim[1]));
if ((*target).tmp == NULL ) (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1));
memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) );
return 0;
}
int mesh_end(struct meshs *mesh)
{
/* free allocated memory */
if ( (*mesh).data!= NULL ) free((*mesh).data);
if ( (*mesh).tmp!= NULL ) free((*mesh).tmp);
return 0;
}
/*..............................................................................*/
/*........................RADIUS HOLE IN CENTER MEMBRANE ORDER PARAM............*/
/*return current bin of free radius*/
long radiushole_order(struct sim * sim)
{
long i;
for (i=0;i<sim->wl.radiusholemax-3;i++){
if ((sim->wl.radiushole[i] >0 ) && (sim->wl.radiushole[i+1] >0 ) && (sim->wl.radiushole[i+2] >0 ) && (sim->wl.radiushole[i+3] >0 ))
return i-1;
}
return -100;
}
/*return order of given radius */
long radiushole_position(double radius, struct sim * sim, int wli)
{
return (long) ceil( ( radius - sim->wl.minorder[wli]) / sim->wl.dorder[wli] );
}
/* return change in order parameter when one particle moves*/
long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli, struct vector *position)
{
long nr,or; /* position in radiushole */
double rx,ry,z;
BOOL oz,nz;
long radiushole_position(double radius, struct sim * sim,int);
long radiushole_order(struct sim *sim);
double anint(double);
void radiushole_print (long *radiushole, long length);
if ( conf->particle[target].type != sim->wl.wlmtype )
return sim->wl.currorder[wli];
z=conf->particle[target].pos.z - position->z; /*if above position*/
if (z-anint(z) < 0) nz = FALSE;
else nz=TRUE;
z=oldpos->z - position->z; /*if above position*/
if (z-anint(z) < 0) oz = FALSE;
else oz=TRUE;
if ( !(nz) && !(oz) )
return sim->wl.currorder[wli];
rx = conf->box.x * (conf->particle[target].pos.x - anint(conf->particle[target].pos.x));
ry = conf->box.y * (conf->particle[target].pos.y - anint(conf->particle[target].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
if (nr < 0)
return -100;
/*particle move over radius bins*/
if (nz) {
sim->wl.radiushole[nr]++;
}
if (oz) {
rx = conf->box.x * (oldpos->x - anint(oldpos->x));
ry = conf->box.y * (oldpos->y - anint(oldpos->y));
or = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
sim->wl.radiushole[or]--;
if ( sim->wl.radiushole[or] < 0 ) {
printf ("Error(single particle move): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",or);
radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax);
fflush(stdout);
}
if (sim->wl.radiushole[or] ==0)
return radiushole_order(sim);
}
if ( (nz) && (sim->wl.radiushole[nr] ==1) ) {
return radiushole_order(sim);
}
return sim->wl.currorder[wli];
}
/* return change in order parameter when chain moves*/
long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli, struct vector *position)
{
long i,current,nr;
double rx,ry,z;
BOOL change=FALSE;
long radiushole_position(double radius, struct sim * sim,int);
long radiushole_order(struct sim *sim);
double anint(double);
void radiushole_print (long *radiushole, long length);
i = 0;
rx=0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
z=conf->particle[current].pos.z - position->z; /*if above system CM*/
if (z-anint(z) > 0) {
rx = conf->box.x * (conf->particle[current].pos.x - anint(conf->particle[current].pos.x));
ry = conf->box.y * (conf->particle[current].pos.y - anint(conf->particle[current].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
if (nr < 0)
return -100;
sim->wl.radiushole[nr]++;
if ( sim->wl.radiushole[nr] == 1 ) change = TRUE;
}
}
i++;
current = chain[i];
}
i = 0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
z=chorig[i].pos.z - position->z; /*if above system CM*/
if (z-anint(z) > 0) {
rx = conf->box.x * (chorig[i].pos.x - anint(chorig[i].pos.x));
ry = conf->box.y * (chorig[i].pos.y - anint(chorig[i].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
sim->wl.radiushole[nr]--;
if ( sim->wl.radiushole[nr] < 0 ) {
printf ("Error (chainmove): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",nr);
radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax);
fflush(stdout);
}
if ( sim->wl.radiushole[nr] == 0 ) change = TRUE;
}
}
i++;
current = chain[i];
}
if ( change ) {
return radiushole_order(sim);
}
return sim->wl.currorder[wli];
}
/* filling the radiushole above vec*/
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli, struct vector *position)
{
long i,nr,radiusholemax;
double rx,ry,z;
long radiushole_position(double radius, struct sim * sim,int);
long radiushole_order(struct sim *sim);
double anint(double);
radiusholemax = radiushole_position(sqrt(conf->box.x*conf->box.x+conf->box.y*conf->box.y),sim,wli);
if ( radiusholemax > sim->wl.radiusholemax ) {
if (sim->wl.radiushole != NULL)
free(sim->wl.radiushole);
sim->wl.radiushole = malloc( sizeof(long)* (radiusholemax));
sim->wl.radiusholemax = radiusholemax;
}
for (i=0;i<radiusholemax;i++) {
sim->wl.radiushole[i] = 0;
}
for (i=0; i< topo->npart; i++) {
/*calculate position of particle from z axis, and add it in array */
if ( conf->particle[i].type == sim->wl.wlmtype ) {
z=conf->particle[i].pos.z - (*position).z; /*if above position*/
if (z-anint(z) > 0) {
rx = conf->box.x * (conf->particle[i].pos.x - anint(conf->particle[i].pos.x));
ry = conf->box.y * (conf->particle[i].pos.y - anint(conf->particle[i].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
if (nr < 0)
return -100;
sim->wl.radiushole[nr]++;
}
}
}
return radiushole_order(sim);
}
void radiushole_print (long *radiushole, long length)
{
long i;
printf("radiushole:\n");
for (i=0;i<length;i++) {
printf("%ld ",radiushole[i]);
}
printf("\n");
}
int longarray_cpy (long **target, long **source, long targetlength, long sourcelength)
{
/*if ( (*target) != NULL) {
if ( targetlength == sourcelength ) {
memcpy((*target),(*source), sizeof(long)*(sourcelength));
return 0;
} else {
free(*target);
}
}*/
if ( (*target) != NULL)
(*target) = (long*) realloc((*target), sizeof(long)*(sourcelength));
else
(*target) = malloc( sizeof(long)*(sourcelength));
memcpy((*target),(*source), sizeof(long)*(sourcelength));
return 0;
}
/*..............................................................................*/
/* ............................... particles in contact ..................... */
/*return order for particles in contact */
long contparticles_order(struct sim * sim, int wli)
{
return (long) ceil( ( sim->wl.partincontact - sim->wl.minorder[wli]) / sim->wl.dorder[wli] );
}
/*returns if particle is in contact*/
BOOL particleinncontact (struct vector *vec, struct conf *conf)
{
double x,y,z;
double anint(double);
x = vec->x - conf->particle[0].pos.x;
y = vec->y - conf->particle[0].pos.y;
z = vec->z - conf->particle[0].pos.z;
x = conf->box.x * (x - anint(x));
y = conf->box.y * (y - anint(y));
z = conf->box.z * (z - anint(z));
if ( x*x + y*y + z*z < WL_CONTACTS) {
return TRUE;
}
else {
return FALSE;
}
}
/* return change in number of particles in contact when one particle moves*/
long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli)
{
long contparticles_order(struct sim * sim, int wli);
BOOL particleinncontact (struct vector *vec, struct conf *conf);
if ( conf->particle[target].type != sim->wl.wlmtype )
return sim->wl.currorder[wli];
if ( particleinncontact (&(conf->particle[target].pos),conf) )
sim->wl.partincontact++;
if ( particleinncontact (oldpos,conf) )
sim->wl.partincontact--;
return contparticles_order(sim,wli);
}
/* return change in order parameter when chain moves*/
long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli)
{
long i,current;
long contparticles_order(struct sim * sim, int wli);
BOOL particleinncontact (struct vector *vec, struct conf *conf);
i = 0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
if ( particleinncontact (&(conf->particle[current].pos),conf) )
sim->wl.partincontact++;
}
i++;
current = chain[i];
}
i = 0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
if ( particleinncontact (&(chorig[i].pos),conf) )
sim->wl.partincontact--;
}
i++;
current = chain[i];
}
return contparticles_order(sim,wli);
}
/* filling all particles in the contact */
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli)
{
long i;
long contparticles_order(struct sim * sim, int wli);
BOOL particleinncontact (struct vector *vec, struct conf *conf);
sim->wl.partincontact = 0;
for (i=1; i< topo->npart; i++) {
/*calculate position of particle and add it if in contact */
if ( conf->particle[i].type == sim->wl.wlmtype ) {
if ( particleinncontact (&(conf->particle[i].pos),conf) )
sim->wl.partincontact++;
}
}
return contparticles_order(sim,wli);
}
/*..............................................................................*/
/*........................GEOMETRIC STUFF.......................................*/
/*..............................................................................*/
/*..............................................................................*/
/*
Find closest distance between line segments and return its vector
gets orientations and lengths of line segments and the vector connecting
their center os masses (from vec1 to vec2)
*/
// Copyright 2001, softSurfer (www.softsurfer.com)
// This code may be freely used and modified for any purpose
// providing that this copyright notice is included with it.
// SoftSurfer makes no warranty for this code, and cannot be held
// liable for any real or imagined damage resulting from its use.
// Users of this code must verify correctness for their application.
struct vector mindist_segments(struct vector dir1, double halfl1,
struct vector dir2, double halfl2, struct vector r_cm)
{
struct vector u,v,w,vec;
double a,b,c,d,e,D,sc,sN,sD,tc,tN,tD;
struct vector vec_scale(struct vector, double);
u = vec_scale(dir1,2.0*halfl1); //S1.P1 - S1.P0;
v = vec_scale(dir2,2.0*halfl2); //S2.P1 - S2.P0;
w.x = dir2.x*halfl2 - dir1.x*halfl1 - r_cm.x;
w.y = dir2.y*halfl2 - dir1.y*halfl1 - r_cm.y;
w.z = dir2.z*halfl2 - dir1.z*halfl1 - r_cm.z; //S1.P0 - S2.P0;
a = DOT(u,u); // always >= 0
b = DOT(u,v);
c = DOT(v,v); // always >= 0
d = DOT(u,w);
e = DOT(v,w);
D = a*c - b*b; // always >= 0
sc = D;
sN = D;
sD = D; // sc = sN / sD, default sD = D >= 0
tc = D;
tN = D;
tD = D; // tc = tN / tD, default tD = D >= 0
// compute the line parameters of the two closest points
if (D < 0.00000001) { // the lines are almost parallel
sN = 0.0; // force using point P0 on segment S1
sD = 1.0; // to prevent possible division by 0.0 later
tN = e;
tD = c;
}
else { // get the closest points on the infinite lines
sN = (b*e - c*d);
tN = (a*e - b*d);
if (sN < 0.0) { // sc < 0 => the s=0 edge is visible
sN = 0.0;
tN = e;
tD = c;
}
else if (sN > sD) { // sc > 1 => the s=1 edge is visible
sN = sD;
tN = e + b;
tD = c;
}
}
if (tN < 0.0) { // tc < 0 => the t=0 edge is visible
tN = 0.0;
// recompute sc for this edge
if (-d < 0.0)
sN = 0.0;
else if (-d > a)
sN = sD;
else {
sN = -d;
sD = a;
}
}
else if (tN > tD) { // tc > 1 => the t=1 edge is visible
tN = tD;
// recompute sc for this edge
if ((-d + b) < 0.0)
sN = 0;
else if ((-d + b) > a)
sN = sD;
else {
sN = (-d + b);
sD = a;
}
}
// finally do the division to get sc and tc
if (fabs(sN) < 0.00000001) sc = 0.0 ;
else sc = sN / sD;
if (fabs(tN) < 0.00000001) tc = 0.0 ;
else tc = tN / tD;
// get the difference of the two closest points
//Vector = w + (sc * u) - (tc * v); // = S1(sc) - S2(tc)
vec.x = u.x*sc + w.x - v.x*tc;
vec.y = u.y*sc + w.y - v.y*tc;
vec.z = u.z*sc + w.z - v.z*tc;
return vec;
}
/*..............................................................................*/
/*
Find closest distance between line segment and point and return it as vector
(from point to closest segment point)
Function gets orientation and length of line segments and the vector connecting
their center os masses (from segment to point)
*/
struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm)
{
struct vector vec;
double c,d,halfl;
halfl=length*0.5;
c = DOT(dir1,r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
vec.x = - r_cm.x + dir1.x * d;
vec.y = - r_cm.y + dir1.y * d;
vec.z = - r_cm.z + dir1.z * d;
return vec;
}
/*..............................................................................*/
/*
Determines whether two particles overlap.
Returns 1 if there is an overlap, 0 if not.
*/
int overlap(struct particles part1, struct particles part2,
struct vector box, struct ia_param ia_params[MAXT][MAXT])
{
double b, c, d, e, f; /* Coefficients in distance quadratic */
double boundary; /* Half length of central boundary zone of quadratic */
double det;
double halfl; /* Half length of cylinder */
double s0, t0; /* det times location of min separation of infinite lines */
double ss, tt; /* Location of min separation of line segments */
struct vector r_cm; /* Vector between centres of mass */
double dist; /* Distance between particles*/
struct vector distvec; /* Distance vector between particles*/
double linemin(double, double);
struct vector image(struct vector, struct vector, struct vector);
r_cm = image(part1.pos, part2.pos, box);
if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/
dist=sqrt(DOT(r_cm,r_cm));
} else {
if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/
/*finding closes contact between them*/
b = -DOT(part1.dir, part2.dir);
d = DOT(part1.dir, r_cm);
e = -DOT(part2.dir, r_cm);
f = DOT(r_cm, r_cm);
det = 1.0 - b*b;
//halfl = length / 2.0;
// Just take the mean
halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1];
halfl /= 2;
boundary = det * halfl;
/* Location of smallest separation of the infinite lines */
s0 = b*e - d;
t0 = b*d - e;
/* Location of smallest separation of line segments */
if (s0 >= boundary) {
if (t0 >= boundary) {
/* Region 2 */
if ( d + halfl + halfl*b < 0.0 ) {
ss = halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = halfl;
ss = linemin( -tt*b - d, halfl );
}
} else if (t0 >= -boundary) {
/* Region 1 */
ss = halfl;
tt = linemin( -ss*b - e, halfl );
} else {
/* Region 8 */
if ( d + halfl - halfl*b < 0.0 ) {
ss = halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = -halfl;
ss = linemin( -tt*b - d, halfl );
}
}
} else if (s0 >= -boundary) {
if (t0 >= boundary) {
/* Region 3 */
tt = halfl;
ss = linemin( -tt*b - d, halfl );
} else if (t0 >= -boundary) {
/* Region 0 */
ss = s0/det;
tt = t0/det;
} else {
/* Region 7 */
tt = -halfl;
ss = linemin( -tt*b - d, halfl );
}
} else {
if (t0 >= boundary) {
/* Region 4 */
if ( d - halfl + halfl*b > 0.0 ) {
ss = -halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = halfl;
ss = linemin( -tt*b - d, halfl );
}
} else if (t0 >= -boundary) {
/* Region 5 */
ss = -halfl;
tt = linemin( -ss*b - e, halfl );
} else {
/* Region 6 */
if ( d - halfl - halfl*b > 0.0 ) {
ss = -halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = -halfl;
ss = linemin( -tt*b - d, halfl );
}
}
}
/*ss snd tt are Location of min separation of line segments */
dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b));
} else {
if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/
//halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/
halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/
c = DOT(part1.dir,r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
distvec.x = - r_cm.x + part1.dir.x * d;
distvec.y = - r_cm.y + part1.dir.y * d;
distvec.z = - r_cm.z + part1.dir.z * d;
dist=sqrt(DOT(distvec,distvec));
} else { /*lst option first one is sphere second one spherocylinder*/
//halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/
halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/
c = DOT(part2.dir,r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
distvec.x = r_cm.x - part2.dir.x * d;
distvec.y = r_cm.y - part2.dir.y * d;
distvec.z = r_cm.z - part2.dir.z * d;
dist=sqrt(DOT(distvec,distvec));
}
}
}
/* Overlap exists if smallest separation is less than diameter of cylinder */
if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) {
return 1;
} else {
return 0;
}
}
/*..............................................................................*/
double linemin(double criterion, double halfl)
{
if (criterion >= halfl) { return halfl; }
else if (criterion >= -halfl) { return criterion; }
else { return -halfl; }
}
/*..............................................................................*/
/*........................SOME USEFUL MATH......................................*/
/*..............................................................................*/
/*
ran2 from Numerical Recipes.
*/
#define IM1 2147483563
#define IM2 2147483399
#define AM (1.0/IM1)
#define IMM1 (IM1-1)
#define IA1 40014
#define IA2 40692
#define IQ1 53668
#define IQ2 52774
#define IR1 12211
#define IR2 3791
#define NTAB 32
#define NDIV (1+IMM1/NTAB)
#define EPS 1.2e-7
#define RNMX (1.0-EPS)
double ran2(long *idum)
{
int j;
long k;
static long idum2=123456789;
static long iy=0;
static long iv[NTAB];
double temp;
if (*idum <= 0) {
if (-(*idum) < 1) *idum=1;
else *idum = -(*idum);
idum2=(*idum);
for (j=NTAB+7;j>=0;j--) {
k=(*idum)/IQ1;
*idum=IA1*(*idum-k*IQ1)-k*IR1;
if (*idum < 0) *idum += IM1;
if (j < NTAB) iv[j] = *idum;
}
iy=iv[0];
}
k=(*idum)/IQ1;
*idum=IA1*(*idum-k*IQ1)-k*IR1;
if (*idum < 0) *idum += IM1;
k=idum2/IQ2;
idum2=IA2*(idum2-k*IQ2)-k*IR2;
if (idum2 < 0) idum2 += IM2;
j=iy/NDIV;
iy=iv[j]-idum2;
iv[j] = *idum;
if (iy < 1) iy += IMM1;
if ((temp=AM*iy) > RNMX) return RNMX;
else return temp;
}
#undef IM1
#undef IM2
#undef AM
#undef IMM1
#undef IA1
#undef IA2
#undef IQ1
#undef IQ2
#undef IR1
#undef IR2
#undef NTAB
#undef NDIV
#undef EPS
#undef RNMX
/*..............................................................................*/
/*
From Numerical Recipes. Simplified to deal specifically with 3*3 matrices
(stored as elements [1...3][1...3] or a 4*4 array).
*/
void tred2(double a[4][4], double d[4], double e[4])
{
int l, k, j, i;
double scale, hh, h, g, f;
for (i=3; i>=2; i--) {
l=i-1;
h=scale=0.0;
if (l > 1) {
for (k=1;k<=l;k++) scale += fabs(a[i][k]);
if (scale == 0.0) e[i]=a[i][l];
else {
for (k=1;k<=l;k++) {
a[i][k] /= scale;
h += a[i][k]*a[i][k];
}
f=a[i][l];
g=(f >= 0.0 ? -sqrt(h) : sqrt(h));
e[i]=scale*g;
h -= f*g;
a[i][l]=f-g;
f=0.0;
for (j=1;j<=l;j++) {
/* a[j][i]=a[i][j]/h; */
g=0.0;
for (k=1;k<=j;k++) g += a[j][k]*a[i][k];
for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k];
e[j]=g/h;
f += e[j]*a[i][j];
}
hh=f/(h+h);
for (j=1;j<=l;j++) {
f=a[i][j];
e[j]=g=e[j]-hh*f;
for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]);
}
}
} else e[i]=a[i][l];
d[i]=h;
}
/* d[1]=0.0; */
e[1]=0.0;
for (i=1; i<=3; i++) {
/* l=i-1;
if (d[i]) {
for (j=1;j<=l;j++) {
g=0.0;
for (k=1;k<=l;k++) g += a[i][k]*a[k][j];
for (k=1;k<=l;k++) a[k][j] -= g*a[k][i];
}
} */
d[i]=a[i][i];
/* a[i][i]=1.0;
for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */
}
}
/*..............................................................................*/
/*
From Numerical Recipes. Simplified to deal specifically with 3*3 matrices
(stored as elements [1...3][1...3] or a 4*4 array).
*/
#define NRANSI
#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
void tqli(double d[4], double e[4])
{
double pythag(double a, double b);
int m, l, iter, i;
/* int k; */
double s, r, p, g, f, dd, c, b;
for (i=2; i<=3; i++) e[i-1] = e[i];
e[3] = 0.0;
for (l=1; l<=3; l++) {
iter = 0;
do {
for (m=l; m<=3-1; m++) {
dd = fabs(d[m]) + fabs(d[m+1]);
if ((double)(fabs(e[m])+dd) == dd) break;
}
if (m != l) {
if (iter++ == 30) {
fprintf(stderr, "Too many iterations in tqli\n");
exit (2);
}
g = (d[l+1] - d[l]) / (2.0*e[l]);
r = pythag(g, 1.0);
g = d[m] - d[l] + e[l] / (g + SIGN(r,g));
s = c = 1.0;
p = 0.0;
for (i=m-1; i>=l; i--) {
f = s * e[i];
b = c * e[i];
e[i+1] = (r=pythag(f,g));
if (r == 0.0) {
d[i+1] -= p;
e[m] = 0.0;
break;
}
s = f/r;
c = g/r;
g = d[i+1] - p;
r = (d[i] - g)*s + 2.0*c*b;
d[i+1] = g+(p=s*r);
g = c*r - b;
/* for (k=1; k<=3; k++) {
f = z[k][i+1];
z[k][i+1] = s*z[k][i]+c*f;
z[k][i] = c*z[k][i]i - s*f;
} */
}
if (r == 0.0 && i >= l) continue;
d[l] -= p;
e[l] = g;
e[m] = 0.0;
}
} while (m != l);
}
}
#undef NRANSI
/*..............................................................................*/
/*
From Numerical Recipes. Used by tqli.
*/
#define NRANSI
static double sqrarg;
#define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg)
double pythag(double a, double b)
{
double absa, absb;
absa = fabs(a);
absb = fabs(b);
if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa));
else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb)));
}
#undef NRANSI
/*..............................................................................*/
/*
Normalise a vector to have unit length. For speed during heavy use, it is
not checked that the supplied vector has non-zero length.
*/
void normalise(struct vector *u)
{
double tot;
tot = sqrt( DOT(*u,*u) );
if (tot !=0.0) {
tot=1/tot;
(*u).x *= tot;
(*u).y *= tot;
(*u).z *= tot;
}
}
/*
Returns the vector pointing from the centre of mass of particle 2 to the
centre of mass of the closest image of particle 1.
*/
struct vector image(struct vector r1, struct vector r2, struct vector box)
{
struct vector r12;
double anint(double);
r12.x = r1.x - r2.x;
r12.y = r1.y - r2.y;
r12.z = r1.z - r2.z;
r12.x = box.x * (r12.x - anint(r12.x));
r12.y = box.y * (r12.y - anint(r12.y));
r12.z = box.z * (r12.z - anint(r12.z));
return r12;
}
/*
Returns the nearest integer to its argument as a double precision number. e.g.
anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic
ANINT.
*/
double anint(double arg)
{
if (arg < 0) {
return (double)( (long)(arg-0.5) );
} else {
return (double)( (long)(arg+0.5) );
}
}
/*..............................................................................*/
/*
Returns an evenly distributed random unit vector of unit length. See Allen &
Tildesley p349 or Frenkel & Smit p410.
RANDOM VECTOR ON UNIT SPHERE
*/
struct vector ranvec(void)
{
double a, b, xi1, xi2;
struct vector unit;
double ran2(long *);
do {
xi1 = 1.0 - 2.0*ran2(&seed);
xi2 = 1.0 - 2.0*ran2(&seed);
a = xi1*xi1 + xi2*xi2;
} while (a > 1.0);
b = 2.0 * sqrt(1.0 - a);
unit.x = xi1 * b;
unit.y = xi2 * b;
unit.z = 1.0 - 2.0*a;
return unit;
}
/**
* returns a point randomly and evenly distributed inside of a unit sphere
*/
struct vector ranvecsph(void)
{
struct vector ranvec;
double ran2(long *);
do{
ranvec.x = 2 * ran2(&seed) - 1.0;
ranvec.y = 2 * ran2(&seed) - 1.0;
ranvec.z = 2 * ran2(&seed) - 1.0;
} while(ranvec.x*ranvec.x +
ranvec.y*ranvec.y +
ranvec.z*ranvec.z >= 1);
//printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z);
return ranvec;
}
/**** some useful math *******/
struct vector vec_create(double x, double y, double z)
{
struct vector newvec;
newvec.x=x;
newvec.y=y;
newvec.z=z;
return newvec;
}
struct vector vec_createarr(double a[3])
{
struct vector newvec;
newvec.x=a[0];
newvec.y=a[1];
newvec.z=a[2];
return newvec;
}
double vec_dotproduct(struct vector A,struct vector B)
{
double dp;
dp = A.x*B.x + A.y*B.y + A.z*B.z;
return dp;
}
/* vector projection of vector A to direction of B*/
struct vector vec_project(struct vector* A,struct vector* B)
{
double dp;
struct vector pr;
dp = A->x*B->x + A->y*B->y + A->z*B->z;
pr.x=B->x*dp;
pr.y=B->y*dp;
pr.z=B->z*dp;
return pr;
}
void ortogonalise(struct vector *A, struct vector B)
{
double dp;
double vec_dotproduct(struct vector A,struct vector B);
dp=vec_dotproduct(*A,B);
(*A).x -= B.x * dp;
(*A).y -= B.y * dp;
(*A).z -= B.z * dp;
}
/* vector projection of vector A perpendicular to direction of B*/
struct vector vec_perpproject(struct vector *A,struct vector *B)
{
struct vector pp;
double dp;
struct vector vec_project(struct vector *, struct vector*);
dp=DOT((*A),(*B));
pp.x = A->x - B->x*dp;
pp.y = A->y - B->y*dp;
pp.z = A->z - B->z*dp;
// fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z);
return pp;
}
/* returns a vector perpendicular to A
nothing special about the vector except that it's one of the perpendicular options and is normalized
*/
struct vector vec_perp(struct vector A)
{
double ratio,x,y;
struct vector somevector;
struct vector vec_create(double, double, double);
struct vector vec_normalize(struct vector);
void normalise(struct vector *);
struct vector vec_crossproduct(struct vector, struct vector);
x=A.x;
y=A.y;
if (x == 0) x=1;
else {
if (y == 0) y=1;
else {
ratio=y/x;
y=x*ratio*2;
}
}
somevector= vec_create(x, y, A.z);
normalise(&somevector);
return vec_crossproduct(A,somevector);
}
/* Perform the multiplication of a matrix A and a vector B where A is the
first argument and B is the second argument. The routine will
return AxB*/
struct vector matrix_vec_multiply(double A[3][3],struct vector B)
{
int i;
double vecarr[3];
struct vector AB,RA;
struct vector vec_createarr(double[3]);
double vec_dotproduct(struct vector,struct vector);
for (i=0;i<3;i++) {
/* index the row vector from A*/
RA=vec_createarr(A[i]);
/* Now find the dot product of this row with B*/
vecarr[i]=vec_dotproduct(RA,B);
}
AB=vec_createarr(vecarr);
return AB;
}
/* Distance between two vectors*/
double vec_distance(struct vector vec1,struct vector vec2)
{
double sum;
sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z);
return pow(sum,0.5);
}
/* Vector size */
double vec_size(struct vector vec)
{
double size;
size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z);
return size;
}
/* Normalize a vector*/
struct vector vec_normalize(struct vector vec)
{
double mag;
struct vector newvec;
double vec_size(struct vector);
mag= vec_size (vec);
mag=1/mag;
newvec.x=vec.x*mag;
newvec.y=vec.y*mag;
newvec.z=vec.z*mag;
return newvec;
}
/* Scale a vector */
struct vector vec_scale(struct vector vec, double scale)
{
vec.x=vec.x*scale;
vec.y=vec.y*scale;
vec.z=vec.z*scale;
return vec;
}
/* cross_product*/
struct vector vec_crossproduct(struct vector A,struct vector B)
{
struct vector cp;
cp.x=( A.y*B.z - A.z*B.y);
cp.y=( -A.x*B.z + A.z*B.x);
cp.z=( A.x*B.y - A.y*B.x);
return cp;
}
/* addition of vectors*/
inline
struct vector vec_sum(struct vector A,struct vector B)
{
struct vector C;
C.x=(A.x + B.x);
C.y=(A.y + B.y);
C.z=(A.z + B.z);
return C;
}
/* subtraction of vectors*/
inline
struct vector vec_sub(struct vector A,struct vector B)
{
struct vector C;
C.x=(A.x - B.x);
C.y=(A.y - B.y);
C.z=(A.z - B.z);
return C;
}
/* asign vlues of vector A by values in vector B*/
inline
void vec_asign(struct vector *A, struct vector B)
{
(*A).x=B.x;
(*A).y=B.y;
(*A).z=B.z;
}
/* generate random unit vector*/
struct vector vec_random(void)
{
struct vector newvec;
struct vector ranvec(void);
newvec=ranvec();
return newvec;
}
/*generate random unit quaternion*/
struct quat quat_random(void)
{
double cosv, sinv;
struct quat newquat;
struct vector newaxis;
struct vector ranvec(void);
/* generate quaternion for rotation*/
newaxis = ranvec(); /*random axes for rotation*/
cosv = cos(PIH * ran2(&seed) );
if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv);
else sinv = -sqrt(1.0 - cosv*cosv);
newquat.w=cosv;
newquat.x=newaxis.x*sinv;
newquat.y=newaxis.y*sinv;
newquat.z=newaxis.z*sinv;
return newquat;
}
/* Create quaternion for rotation around vector "vec" of angle in degrees "angle"
function need cos of half angle and its sin*/
struct quat quat_create(struct vector vec, double vc, double vs)
{
struct quat newquat;
newquat.w=vc;
newquat.x=vec.x*vs;
newquat.y=vec.y*vs;
newquat.z=vec.z*vs;
return newquat;
}
/*rotate vector with quaternion*/
void vec_rotate(struct vector *vec, struct quat quat)
{
double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz;
/* t1 = quat.w * quat.w; */
t2 = quat.w * quat.x;
t3 = quat.w * quat.y;
t4 = quat.w * quat.z;
t5 = -quat.x * quat.x;
t6 = quat.x * quat.y;
t7 = quat.x * quat.z;
t8 = -quat.y * quat.y;
t9 = quat.y * quat.z;
t10 = -quat.z * quat.z;
newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x;
newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y;
newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z;
(*vec).x = newx;
(*vec).y = newy;
(*vec).z = newz;
}
/* rotate spherocylinder by quaternion of random axis and angle smaller than
maxcos(cosine of angle half), we do everything on site for speed */
void psc_rotate(struct particles *psc, double max_angle,int geotype)
{
double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10;
double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz;
int k,m;
struct quat newquat;
struct vector newaxis;
struct vector ranvec(void);
/* generate quaternion for rotation*/
newaxis = ranvec(); /*random axes for rotation*/
// maxcos = cos(maxorient/2/180*PI);
// vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/
vc = cos(max_angle * ran2(&seed) );
if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc);
else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/
newquat.w=vc;
newquat.x=newaxis.x*vs;
newquat.y=newaxis.y*vs;
newquat.z=newaxis.z*vs;
/* do quaternion rotation*/
t2 = newquat.w * newquat.x;
t3 = newquat.w * newquat.y;
t4 = newquat.w * newquat.z;
t5 = -newquat.x * newquat.x;
t6 = newquat.x * newquat.y;
t7 = newquat.x * newquat.z;
t8 = -newquat.y * newquat.y;
t9 = newquat.y * newquat.z;
t10 = -newquat.z * newquat.z;
d1 = t8 + t10;
d2 = t6 - t4;
d3 = t3 + t7;
d4 = t4 + t6;
d5 = t5 + t10;
d6 = t9 - t2;
d7 = t7 - t3;
d8 = t2 + t9;
d9 = t5 + t8;
/*rotate spherocylinder direction vector*/
newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x;
newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y;
newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z;
psc->dir.x = newx;
psc->dir.y = newy;
psc->dir.z = newz;
m=1;
if ( (geotype != SCN) && (geotype != SCA) ) {
if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) )
m=2;
for (k=0;k<m;k++) {
/*rotate patch direction vector*/
newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x;
newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y;
newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z;
psc->patchdir[k].x = newx;
psc->patchdir[k].y = newy;
psc->patchdir[k].z = newz;
/*rotate patch sides vectors*/
newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x;
newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y;
newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z;
psc->patchsides[0+2*k].x = newx;
psc->patchsides[0+2*k].y = newy;
psc->patchsides[0+2*k].z = newz;
newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x;
newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y;
newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z;
psc->patchsides[1+2*k].x = newx;
psc->patchsides[1+2*k].y = newy;
psc->patchsides[1+2*k].z = newz;
}
}
m=1;
if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) {
if ( (geotype == TCHPSC) || (geotype == TCHCPSC) )
m=2;
for (k=0;k<m;k++) {
/*rotate chiral direction vector*/
newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x;
newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y;
newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z;
psc->chdir[k].x = newx;
psc->chdir[k].y = newy;
psc->chdir[k].z = newz;
}
}
}
/*returns a position of center of mass of system*/
void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf)
{
long i;
double anint(double);
conf->syscm.x = 0;
conf->syscm.y = 0;
conf->syscm.z = 0;
for (i=0; i<npart; i++) {
/*using periodic boundary conditions*/
conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) *
ia_params[conf->particle[i].type][conf->particle[i].type].volume;
conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) *
ia_params[conf->particle[i].type][conf->particle[i].type].volume;
conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) *
ia_params[conf->particle[i].type][conf->particle[i].type].volume;
}
conf->syscm.x /= conf->sysvolume;
conf->syscm.y /= conf->sysvolume;
conf->syscm.z /= conf->sysvolume;
return;
}
/* rotate cluster of particles by quaternion of random axis and angle smaller than
maxcos(cosine of angle half), we do everything on site for speed */
void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf)
{
long current,i;
double vc,vs;
//double quatsize;
struct quat newquat;
struct vector newaxis;
struct vector ranvec(void);
void vec_rotate(struct vector *, struct quat);
// create rotation quaternion
newaxis = ranvec(); /*random axes for rotation*/
// maxcos = cos(maxorient/2/180*PI);
//vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/
vc = cos(max_angle * ran2(&seed) );
if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc);
else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/
newquat.w=vc;
newquat.x=newaxis.x*vs;
newquat.y=newaxis.y*vs;
newquat.z=newaxis.z*vs;
//quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z);
//shift position to geometrical center
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
//shift position to geometrical center
conf->particle[current].pos.x -= gc.x;
conf->particle[current].pos.y -= gc.y;
conf->particle[current].pos.z -= gc.z;
//scale things by box not to have them distorted
conf->particle[current].pos.x *= conf->box.x;
conf->particle[current].pos.y *= conf->box.y;
conf->particle[current].pos.z *= conf->box.z;
//do rotation
vec_rotate(&conf->particle[current].pos, newquat);
vec_rotate(&conf->particle[current].dir, newquat);
vec_rotate(&conf->particle[current].patchdir[0], newquat);
vec_rotate(&conf->particle[current].patchdir[1], newquat);
vec_rotate(&conf->particle[current].chdir[0], newquat);
vec_rotate(&conf->particle[current].chdir[1], newquat);
vec_rotate(&conf->particle[current].patchsides[0], newquat);
vec_rotate(&conf->particle[current].patchsides[1], newquat);
vec_rotate(&conf->particle[current].patchsides[2], newquat);
vec_rotate(&conf->particle[current].patchsides[3], newquat);
//sclae back
conf->particle[current].pos.x /= conf->box.x;
conf->particle[current].pos.y /= conf->box.y;
conf->particle[current].pos.z /= conf->box.z;
//shift positions back
conf->particle[current].pos.x += gc.x;
conf->particle[current].pos.y += gc.y;
conf->particle[current].pos.z += gc.z;
i++;
current = topo->chainlist[target][i];
}
}
/* put the particle in the original box using periodic boundary conditions
in our system the particle positions are scaled by box size so to get them
into original obx is to get htem between 0 and 1 and then scale this back
by size of box*/
void origbox(struct vector *pos,struct vector box)
{
double anint(double);
(*pos).x = box.x * ((*pos).x - anint((*pos).x));
(*pos).y = box.y * ((*pos).y - anint((*pos).y));
(*pos).z = box.z * ((*pos).z - anint((*pos).z));
}
/* use of periodic boundary conditions*/
void usepbc(struct vector *pos,struct vector pbc)
{
do {
(*pos).x += pbc.x;
} while ((*pos).x < 0.0);
do {
(*pos).x -= pbc.x;
} while ((*pos).x > pbc.x);
do {
(*pos).y += pbc.y;
} while ((*pos).y < 0.0);
do {
(*pos).y -= pbc.y;
} while ((*pos).y > pbc.y);
do {
(*pos).z += pbc.z;
} while ((*pos).z < 0.0);
do {
(*pos).z -= pbc.z;
} while ((*pos).z > pbc.z);
}
/*..............................................................................*/
/*.......................TEMPLATE FILES.........................................*/
/*..............................................................................*/
/*
# Template for the "options" file. Options start with an '#'.
# Pressure couplings:
# 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic
# xy and keep Volume constant
# Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL)
# O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle
# 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle
# 7 = number of particles in contact (within distance sqrt(WL_CONTACTS))
ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const)
press = 1 # Pressure
paralpress = 1 # Parallel pressure for replica exchange
shave = 0 # Average number of volume change attempts per sweep (usually 1)
nequil = 0 # Number of equilibration sweeps
adjust = 0 # Number of equilibration sweeps between step size adjustments
nsweeps = 1000000 # Number of production sweeps
paramfrq = 1000000 # Number of sweeps between order parameter samples
report = 1000000 # Number of sweeps between statistics reports
nrepchange = 1000 # Number of sweeps between replica exchanges
movie = 100000 # Number of sweeps between movie frames (0 = no movie)
chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain)
transmx = 0.212 # Initial maximum displacement
rotmx = 7.5 # Initial maximum orientation change (degrees)
edge_mx = 0.0 # Initial maximum box length change
chainmmx = 0.0 # Initial maximum chain displacement
chainrmx = 0.0 # Initial maximum chain rotation change (degrees)
temper = 1.0 # Temperature in units kT/e
paraltemper = 1.5 # Temperature for parallel tempering in kT/e
wlm = 0 # Wang-Landau method
wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated?
switchprob = 0.0016 # Probability of type switch attempts per sweep
pairlist_update = 8 # Number of sweeps after which the pairlist should be updated
seed = 1 # Random number seed
write_cluster = 10000 # Number of sweeps per writing out cluster info
# End of the file
*/
/*
Example of 'Config.init' file, but you must delete comments... there are only number in configuration file
#box
10.0 10.0 10.0
#particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched)
*/
/*
Template for the topology file 'top.init'. ( "\\" is symbol for line continue,
"#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign
for kyeword ) There are three keywords, types, molecules, and system. They
should be given in this order.
TYPES:
spherocylinders
SC - purely repulsive spherocylinder with WCA potential on closest distance
SCA - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between spherocylinders..
PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch
goes all the way through, making also hemispherical caps on end attractive
CPSC - Attractive potential in limited to an angular wedge on cylindrical part
of spherocylinders. The hemispherical caps on ends are repulsive
spheres
(T)(CH)PSC - T adds second patch, CH - adds chirality
SP - purely repulsive shpere with WCA potential on closest distance
SPA - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between obejcts
[Types]
# NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE
Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3
Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3
Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10
Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10
[Molecules]
# Molecules letter
# bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance)
# bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance)
# bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance)
# angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0)
# angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0)
# particles - types as they go in chain in molecule
A: {
#what: TYPE SWITCHTYPE DELTA_MU
particles: 1 2 0.5
particles: 2
}
B: {
particles: 1
particles: 2 1 0.3
}
[System]
A 2
B 2
[EXTER]
# wall interaction
# THICKNESS EPSILON ATTRACTION_SWITCH
5.0 1.0 1.0
[EXCLUDE]
#set pair types for which attraction will be excluded (reversepair is automaticaly added)
1 2
1 3
*/
|
pi-v8.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if !_DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if !_DEBUG_
start= omp_get_wtime();
#endif
/* do computation -- using all available threads */
// WARNING : correct code
#pragma omp parallel private(i,x) reduction(+:sum)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(static,1)
for (i=0; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if !_DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
for (i=0; i < (ssize_t) strlen(map); i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (strlen(map) == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
const char
*value;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
GeometryInfo
geometry_info;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if (IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse)
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones == (Image *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SubimageSpecificationReturnsNoImages","`%s'",read_info->filename);
else
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property,
timestamp[MagickPathExtent];
const char
*option;
const StringInfo
*profile;
ssize_t
option_type;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if (*magick_path == '\0' && *next->magick == '\0')
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
value=GetImageProperty(next,"exif:Orientation",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:Orientation",exception);
if (value != (char *) NULL)
{
next->orientation=(OrientationType) StringToLong(value);
(void) DeleteImageProperty(next,"tiff:Orientation");
(void) DeleteImageProperty(next,"exif:Orientation");
}
value=GetImageProperty(next,"exif:XResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.x;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:XResolution");
}
value=GetImageProperty(next,"exif:YResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.y;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:YResolution");
}
value=GetImageProperty(next,"exif:ResolutionUnit",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:ResolutionUnit",exception);
if (value != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
value);
if (option_type >= 0)
next->units=(ResolutionType) option_type;
(void) DeleteImageProperty(next,"exif:ResolutionUnit");
(void) DeleteImageProperty(next,"tiff:ResolutionUnit");
}
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
option=GetImageOption(read_info,"caption");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"comment");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"label");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (next->delay > (size_t) floor(geometry_info.rho+0.5))
next->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (next->delay < (size_t) floor(geometry_info.rho+0.5))
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
next->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
{
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
option);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
register const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
p++;
length=0;
blob=Base64Decode(p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
register Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
register Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
register ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
sp.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - SP
This benchmark is an OpenMP C version of the NPB SP code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: R. Van der Wijngaart
W. Saphir
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "header.h"
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void ninvr(void);
static void pinvr(void);
static void compute_rhs(void);
static void set_constants(void);
static void txinvr(void);
static void tzetar(void);
static void verify(int no_time_steps, char *class, boolean *verified);
static void x_solve(void);
static void y_solve(void);
static void z_solve(void);
/*--------------------------------------------------------------------
program SP
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step;
double mflops, tmax;
int nthreads = 1;
boolean verified;
char class;
FILE *fp;
/*--------------------------------------------------------------------
c Read input file (if it exists), else take
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - SP Benchmark\n\n");
fp = fopen("inputsp.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputsp.data\n");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputsp.data. Using compiled defaults");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if ( (grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) ) {
printf("%d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
timer_clear(1);
timer_start(1);
for (step = 1; step <= niter; step++) {
if (step % 20 == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &class, &verified);
if (tmax != 0) {
mflops = ( 881.174 * pow((double)PROBLEM_SIZE, 3.0)
- 4683.91 * pow2((double)PROBLEM_SIZE)
+ 11484.5 * (double)PROBLEM_SIZE
- 19272.4) * (double)niter / (tmax*1000000.0);
} else {
mflops = 0.0;
}
c_print_results("SP", class, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void add(void) {
int i, j, k, m;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
#pragma omp for
for (m = 0; m < 5; m++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
compute_rhs();
txinvr();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[m][i][j][k] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i <= grid_points[0]-2; i++) {
for (j = 0; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-2; k++) {
for (m = 0; m < 5; m++) {
add = rhs[m][i][j][k];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k= 0; k <= grid_points[2]-1; k++) {
forcing[m][i][j][k] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][i] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][i] = dtpp * dtemp[m];
}
cuf[i] = buf[1][i] * buf[1][i];
buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i];
q[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i]
+ buf[3][i]*ue[3][i]);
}
for (i = 1; i <= grid_points[0]-2; i++) {
im1 = i-1;
ip1 = i+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
tx2*( ue[1][ip1]-ue[1][im1] )+
dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))-
(ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+
xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+
dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+
xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+
dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+
xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+
dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])-
buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+
0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+
buf[0][im1])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+
dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]);
i = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][i-1] + 6.0*ue[m][i] -
4.0*ue[m][i+1] + ue[m][i+2]);
}
for (m = 0; m < 5; m++) {
for (i = 3; i <= grid_points[0]-4; i++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][i-2] - 4.0*ue[m][i-1] +
6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][i-2] - 4.0*ue[m][i-1] +
6.0*ue[m][i] - 4.0*ue[m][i+1]);
i = grid_points[0]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][j] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][j] = dtpp * dtemp[m];
}
cuf[j] = buf[2][j] * buf[2][j];
buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] +
buf[3][j] * buf[3][j];
q[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] +
buf[3][j]*ue[3][j]);
}
for (j = 1; j <= grid_points[1]-2; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
ty2*( ue[2][jp1]-ue[2][jm1] )+
dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+
yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+
dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))-
(ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+
yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+
dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+
yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+
dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])-
buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+
0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+
buf[0][jm1])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+
dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]);
j = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][j-1] + 6.0*ue[m][j] -
4.0*ue[m][j+1] + ue[m][j+2]);
}
for (m = 0; m < 5; m++) {
for (j = 3; j <= grid_points[1]-4; j++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][j-2] - 4.0*ue[m][j-1] +
6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][j-2] - 4.0*ue[m][j-1] +
6.0*ue[m][j] - 4.0*ue[m][j+1]);
j = grid_points[1]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][k] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][k] = dtpp * dtemp[m];
}
cuf[k] = buf[3][k] * buf[3][k];
buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] +
buf[2][k] * buf[2][k];
q[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] +
buf[3][k]*ue[3][k]);
}
for (k = 1; k <= grid_points[2]-2; k++) {
km1 = k-1;
kp1 = k+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
tz2*( ue[3][kp1]-ue[3][km1] )+
dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+
zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+
dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+
zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+
dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))-
(ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+
zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+
dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])-
buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+
0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k]
+buf[0][km1])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+
dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]);
k = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][k-1] + 6.0*ue[m][k] -
4.0*ue[m][k+1] + ue[m][k+2]);
}
for (m = 0; m < 5; m++) {
for (k = 3; k <= grid_points[2]-4; k++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][k-2] - 4.0*ue[m][k-1] +
6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][k-2] - 4.0*ue[m][k-1] +
6.0*ue[m][k] - 4.0*ue[m][k+1]);
k = grid_points[2]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[0][m] +
xi*(ce[1][m] + xi*(ce[4][m] +
xi*(ce[7][m] + xi*ce[10][m]))) +
eta*(ce[2][m] + eta*(ce[5][m] +
eta*(ce[8][m] + eta*ce[11][m])))+
zeta*(ce[3][m] + zeta*(ce[6][m] +
zeta*(ce[9][m] +
zeta*ce[12][m])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
for (i = 0; i <= IMAX-1; i++) {
for (j = 0; j <= IMAX-1; j++) {
for (k = 0; k <= IMAX-1; k++) {
u[0][i][j][k] = 1.0;
u[1][i][j][k] = 0.0;
u[2][i][j][k] = 0.0;
u[3][i][j][k] = 0.0;
u[4][i][j][k] = 1.0;
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&Pface[ix][0][0]);
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[m][i][j][k] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
xi = 0.0;
i = 0;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
xi = 1.0;
i = grid_points[0]-1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
eta = 0.0;
j = 0;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
eta = 1.0;
j = grid_points[1]-1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
zeta = 0.0;
k = 0;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
zeta = 1.0;
k = grid_points[2]-1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, n;
/*--------------------------------------------------------------------
c zap the whole left hand side for starters
c-------------------------------------------------------------------*/
for (n = 0; n < 15; n++) {
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
lhs[n][i][j][k] = 0.0;
}
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but
c convenient
c-------------------------------------------------------------------*/
for (n = 0; n < 3; n++) {
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
lhs[5*n+2][i][j][k] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three x-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
#pragma omp for
for (i = 0; i <= grid_points[0]-1; i++) {
ru1 = c3c4*rho_i[i][j][k];
cv[i] = us[i][j][k];
rhon[i] = max(dx2+con43*ru1,
max(dx5+c1c5*ru1,
max(dxmax+ru1,
dx1)));
}
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = - dttx2 * cv[i-1] - dttx1 * rhon[i-1];
lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i];
lhs[3][i][j][k] = dttx2 * cv[i+1] - dttx1 * rhon[i+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for nowait
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;
lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6;
lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4;
lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1;
}
}
#pragma omp for nowait
for (i = 3; i <= grid_points[0]-4; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
i = grid_points[0]-3;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1;
lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;
lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, fill the other factors (u+c), (u-c) by adding to
c the first
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dttx2 * speed[i-1][j][k];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dttx2 * speed[i+1][j][k];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dttx2 * speed[i-1][j][k];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dttx2 * speed[i+1][j][k];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
#pragma omp for
for (j = 0; j <= grid_points[1]-1; j++) {
ru1 = c3c4*rho_i[i][j][k];
cv[j] = vs[i][j][k];
rhoq[j] = max(dy3 + con43 * ru1,
max(dy5 + c1c5*ru1,
max(dymax + ru1,
dy1)));
}
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];
lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j];
lhs[3][i][j][k] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;
lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6;
lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4;
lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;
}
}
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 3; j <= grid_points[1]-4; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
j = grid_points[1]-3;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1;
lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;
lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, do the other two factors
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dtty2 * speed[i][j-1][k];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dtty2 * speed[i][j+1][k];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dtty2 * speed[i][j-1][k];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dtty2 * speed[i][j+1][k];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
#pragma omp for
for (k = 0; k <= grid_points[2]-1; k++) {
ru1 = c3c4*rho_i[i][j][k];
cv[k] = ws[i][j][k];
rhos[k] = max(dz4 + con43 * ru1,
max(dz5 + c1c5 * ru1,
max(dzmax + ru1,
dz1)));
}
#pragma omp for
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];
lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k];
lhs[3][i][j][k] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;
lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6;
lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4;
lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;
}
}
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 3; k <= grid_points[2]-4; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
k = grid_points[2]-3;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1;
lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;
lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, fill the other factors (u+c), (u-c)
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dttz2 * speed[i][j][k-1];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dttz2 * speed[i][j][k+1];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dttz2 * speed[i][j][k-1];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dttz2 * speed[i][j][k+1];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ninvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r3;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = -r2;
rhs[1][i][j][k] = r1;
rhs[2][i][j][k] = bt * ( r4 - r5 );
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pinvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp parallel for default(shared) private(i,j,k,r1,r2,r3,r4,r5,t1,t2)
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r1;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = bt * ( r4 - r5 );
rhs[1][i][j][k] = -r3;
rhs[2][i][j][k] = r2;
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, m;
double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1,
wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k = 0; k <= grid_points[2]-1; k++) {
rho_inv = 1.0/u[0][i][j][k];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[1][i][j][k] * rho_inv;
vs[i][j][k] = u[2][i][j][k] * rho_inv;
ws[i][j][k] = u[3][i][j][k] * rho_inv;
square[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] +
u[2][i][j][k]*u[2][i][j][k] +
u[3][i][j][k]*u[3][i][j][k] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
/*--------------------------------------------------------------------
c (do not need speed and ainx until the lhs computation)
c-------------------------------------------------------------------*/
aux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);
aux = sqrt(aux);
speed[i][j][k] = aux;
ainv[i][j][k] = 1.0/aux;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k = 0; k <= grid_points[2]-1; k++) {
rhs[m][i][j][k] = forcing[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 *
(u[0][i+1][j][k] - 2.0*u[0][i][j][k] +
u[0][i-1][j][k]) -
tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 *
(u[1][i+1][j][k] - 2.0*u[1][i][j][k] +
u[1][i-1][j][k]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[1][i+1][j][k]*up1 -
u[1][i-1][j][k]*um1 +
(u[4][i+1][j][k]- square[i+1][j][k]-
u[4][i-1][j][k]+ square[i-1][j][k])*
c2);
rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 *
(u[2][i+1][j][k] - 2.0*u[2][i][j][k] +
u[2][i-1][j][k]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[2][i+1][j][k]*up1 -
u[2][i-1][j][k]*um1);
rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 *
(u[3][i+1][j][k] - 2.0*u[3][i][j][k] +
u[3][i-1][j][k]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[3][i+1][j][k]*up1 -
u[3][i-1][j][k]*um1);
rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 *
(u[4][i+1][j][k] - 2.0*u[4][i][j][k] +
u[4][i-1][j][k]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i-1][j][k]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[4][i+1][j][k] -
c2*square[i+1][j][k])*up1 -
(c1*u[4][i-1][j][k] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +
u[m][i+2][j][k]);
}
}
}
i = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -
4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +
u[m][i+2][j][k] );
}
}
}
}
i = grid_points[0]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );
}
}
}
i = grid_points[0]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
5.0*u[m][i][j][k] );
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 *
(u[0][i][j+1][k] - 2.0*u[0][i][j][k] +
u[0][i][j-1][k]) -
ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 *
(u[1][i][j+1][k] - 2.0*u[1][i][j][k] +
u[1][i][j-1][k]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[1][i][j+1][k]*vp1 -
u[1][i][j-1][k]*vm1);
rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 *
(u[2][i][j+1][k] - 2.0*u[2][i][j][k] +
u[2][i][j-1][k]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[2][i][j+1][k]*vp1 -
u[2][i][j-1][k]*vm1 +
(u[4][i][j+1][k] - square[i][j+1][k] -
u[4][i][j-1][k] + square[i][j-1][k])
*c2);
rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 *
(u[3][i][j+1][k] - 2.0*u[3][i][j][k] +
u[3][i][j-1][k]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[3][i][j+1][k]*vp1 -
u[3][i][j-1][k]*vm1);
rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 *
(u[4][i][j+1][k] - 2.0*u[4][i][j][k] +
u[4][i][j-1][k]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i][j-1][k]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[4][i][j+1][k] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[4][i][j-1][k] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +
u[m][i][j+2][k]);
}
}
}
j = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -
4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +
u[m][i][j+2][k] );
}
}
}
}
j = grid_points[1]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );
}
}
}
j = grid_points[1]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
5.0*u[m][i][j][k] );
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 *
(u[0][i][j][k+1] - 2.0*u[0][i][j][k] +
u[0][i][j][k-1]) -
tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 *
(u[1][i][j][k+1] - 2.0*u[1][i][j][k] +
u[1][i][j][k-1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[1][i][j][k+1]*wp1 -
u[1][i][j][k-1]*wm1);
rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 *
(u[2][i][j][k+1] - 2.0*u[2][i][j][k] +
u[2][i][j][k-1]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[2][i][j][k+1]*wp1 -
u[2][i][j][k-1]*wm1);
rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 *
(u[3][i][j][k+1] - 2.0*u[3][i][j][k] +
u[3][i][j][k-1]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[3][i][j][k+1]*wp1 -
u[3][i][j][k-1]*wm1 +
(u[4][i][j][k+1] - square[i][j][k+1] -
u[4][i][j][k-1] + square[i][j][k-1])
*c2);
rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 *
(u[4][i][j][k+1] - 2.0*u[4][i][j][k] +
u[4][i][j][k-1]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i][j][k-1]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[4][i][j][k+1] -
c2*square[i][j][k+1])*wp1 -
(c1*u[4][i][j][k-1] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +
u[m][i][j][k+2]);
}
}
}
k = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -
4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 3*1; k <= grid_points[2]-3*1-1; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +
u[m][i][j][k+2] );
}
}
}
}
k = grid_points[2]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );
}
}
}
k = grid_points[2]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
5.0*u[m][i][j][k] );
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] * dt;
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[1][0] = 0.0;
ce[2][0] = 0.0;
ce[3][0] = 4.0;
ce[4][0] = 5.0;
ce[5][0] = 3.0;
ce[6][0] = 0.5;
ce[7][0] = 0.02;
ce[8][0] = 0.01;
ce[9][0] = 0.03;
ce[10][0] = 0.5;
ce[11][0] = 0.4;
ce[12][0] = 0.3;
ce[0][1] = 1.0;
ce[1][1] = 0.0;
ce[2][1] = 0.0;
ce[3][1] = 0.0;
ce[4][1] = 1.0;
ce[5][1] = 2.0;
ce[6][1] = 3.0;
ce[7][1] = 0.01;
ce[8][1] = 0.03;
ce[9][1] = 0.02;
ce[10][1] = 0.4;
ce[11][1] = 0.3;
ce[12][1] = 0.5;
ce[0][2] = 2.0;
ce[1][2] = 2.0;
ce[2][2] = 0.0;
ce[3][2] = 0.0;
ce[4][2] = 0.0;
ce[5][2] = 2.0;
ce[6][2] = 3.0;
ce[7][2] = 0.04;
ce[8][2] = 0.03;
ce[9][2] = 0.05;
ce[10][2] = 0.3;
ce[11][2] = 0.5;
ce[12][2] = 0.4;
ce[0][3] = 2.0;
ce[1][3] = 2.0;
ce[2][3] = 0.0;
ce[3][3] = 0.0;
ce[4][3] = 0.0;
ce[5][3] = 2.0;
ce[6][3] = 3.0;
ce[7][3] = 0.03;
ce[8][3] = 0.05;
ce[9][3] = 0.04;
ce[10][3] = 0.2;
ce[11][3] = 0.1;
ce[12][3] = 0.3;
ce[0][4] = 5.0;
ce[1][4] = 4.0;
ce[2][4] = 3.0;
ce[3][4] = 2.0;
ce[4][4] = 0.1;
ce[5][4] = 0.4;
ce[6][4] = 0.3;
ce[7][4] = 0.05;
ce[8][4] = 0.04;
ce[9][4] = 0.03;
ce[10][4] = 0.1;
ce[11][4] = 0.3;
ce[12][4] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
bt = sqrt(0.5);
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void txinvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
--------------------------------------------------------------------*/
int i, j, k;
double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3,
r4, r5, ac2inv;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
ru1 = rho_i[i][j][k];
uu = us[i][j][k];
vv = vs[i][j][k];
ww = ws[i][j][k];
ac = speed[i][j][k];
ac2inv = ainv[i][j][k]*ainv[i][j][k];
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 -
vv*r3 - ww*r4 + r5 );
t2 = bt * ru1 * ( uu * r1 - r2 );
t3 = ( bt * ru1 * ac ) * t1;
rhs[0][i][j][k] = r1 - t1;
rhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );
rhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );
rhs[3][i][j][k] = - t2 + t3;
rhs[4][i][j][k] = t2 + t3;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void tzetar(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3,
r4, r5, btuz, acinv, ac2u, uzik1;
#pragma omp for private(i,j,k,t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1)
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
xvel = us[i][j][k];
yvel = vs[i][j][k];
zvel = ws[i][j][k];
ac = speed[i][j][k];
acinv = ainv[i][j][k];
ac2u = ac*ac;
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
uzik1 = u[0][i][j][k];
btuz = bt * uzik1;
t1 = btuz*acinv * (r4 + r5);
t2 = r3 + t1;
t3 = btuz * (r4 - r5);
rhs[0][i][j][k] = t2;
rhs[1][i][j][k] = -uzik1*r2 + xvel*t2;
rhs[2][i][j][k] = uzik1*r1 + yvel*t2;
rhs[3][i][j][k] = zvel*t2 + t3;
rhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) +
qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *class, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
--------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*class = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02
--------------------------------------------------------------------*/
if ( grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 100) {
*class = 'S';
dtref = 1.5e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 2.7470315451339479e-02;
xcrref[1] = 1.0360746705285417e-02;
xcrref[2] = 1.6235745065095532e-02;
xcrref[3] = 1.5840557224455615e-02;
xcrref[4] = 3.4849040609362460e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 2.7289258557377227e-05;
xceref[1] = 1.0364446640837285e-05;
xceref[2] = 1.6154798287166471e-05;
xceref[3] = 1.5750704994480102e-05;
xceref[4] = 3.4177666183390531e-05;
/*--------------------------------------------------------------------
c reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 36 &&
grid_points[1] == 36 &&
grid_points[2] == 36 &&
no_time_steps == 400) {
*class = 'W';
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.1893253733584e-02;
xcrref[1] = 0.1717075447775e-03;
xcrref[2] = 0.2778153350936e-03;
xcrref[3] = 0.2887475409984e-03;
xcrref[4] = 0.3143611161242e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.7542088599534e-04;
xceref[1] = 0.6512852253086e-05;
xceref[2] = 0.1049092285688e-04;
xceref[3] = 0.1128838671535e-04;
xceref[4] = 0.1212845639773e-03;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 400 ) {
*class = 'A';
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 2.4799822399300195;
xcrref[1] = 1.1276337964368832;
xcrref[2] = 1.5028977888770491;
xcrref[3] = 1.4217816211695179;
xcrref[4] = 2.1292113035138280;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 1.0900140297820550e-04;
xceref[1] = 3.7343951769282091e-05;
xceref[2] = 5.0092785406541633e-05;
xceref[3] = 4.7671093939528255e-05;
xceref[4] = 1.3621613399213001e-04;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 400 time steps,
c with DT = 1.0d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 400) {
*class = 'B';
dtref = 1.0e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.6903293579998e+02;
xcrref[1] = 0.3095134488084e+02;
xcrref[2] = 0.4103336647017e+02;
xcrref[3] = 0.3864769009604e+02;
xcrref[4] = 0.5643482272596e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.9810006190188e-02;
xceref[1] = 0.1022827905670e-02;
xceref[2] = 0.1720597911692e-02;
xceref[3] = 0.1694479428231e-02;
xceref[4] = 0.1847456263981e-01;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 400 time steps,
c with DT = 0.67d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 400) {
*class = 'C';
dtref = 0.67e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.5881691581829e+03;
xcrref[1] = 0.2454417603569e+03;
xcrref[2] = 0.3293829191851e+03;
xcrref[3] = 0.3081924971891e+03;
xcrref[4] = 0.4597223799176e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.2598120500183e+00;
xceref[1] = 0.2590888922315e-01;
xceref[2] = 0.5132886416320e-01;
xceref[3] = 0.4806073419454e-01;
xceref[4] = 0.5483377491301e+00;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ;
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*class != 'U') {
printf(" Verification being performed for class %1c\n", *class);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the x-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the x-lines. Boundary conditions are non-periodic
--------------------------------------------------------------------*/
int i, j, k, n, i1, i2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
--------------------------------------------------------------------*/
lhsx();
/*--------------------------------------------------------------------
c perform the Thomas algorithm; first, FORWARD ELIMINATION
--------------------------------------------------------------------*/
n = 0;
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
}
lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];
lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i2][j][k] = rhs[m][i2][j][k] -
lhs[n+0][i2][j][k]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1.0/lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i1][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];
lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];
rhs[m][i2][j][k] = rhs[m][i2][j][k] -
lhs[n+0][i2][j][k]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i1][j][k];
rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k];
}
}
}
for (m = 3; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
n = (m-3+1)*5;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c The first three factors
--------------------------------------------------------------------*/
n = 0;
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (m = 0; m < 3; m++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k] -
lhs[n+4][i][j][k]*rhs[m][i2][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k] -
lhs[n+4][i][j][k]*rhs[m][i2][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
c Do the block-diagonal inversion
--------------------------------------------------------------------*/
ninvr();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the y-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the y-lines. Boundary conditions are non-periodic
--------------------------------------------------------------------*/
int i, j, k, n, j1, j2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
--------------------------------------------------------------------*/
lhsy();
n = 0;
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
}
lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];
lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j2][k] = rhs[m][i][j2][k] -
lhs[n+0][i][j2][k]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j1][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];
lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];
rhs[m][i][j2][k] = rhs[m][i][j2][k] -
lhs[n+0][i][j2][k]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j1][k];
rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k];
}
}
}
for (m = 3; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
n = (m-3+1)*5;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c The first three factors
--------------------------------------------------------------------*/
n = 0;
for (m = 0; m < 3; m++) {
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k] -
lhs[n+4][i][j][k]*rhs[m][i][j2][k];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j1 + 1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k] -
lhs[n+4][i][j][k]*rhs[m][i][j2][k];
}
}
}
}
}
pinvr();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the z-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the z-lines. Boundary conditions are non-periodic
c-------------------------------------------------------------------*/
int i, j, k, n, k1, k2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
c-------------------------------------------------------------------*/
lhsz();
n = 0;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
}
lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];
lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k2] = rhs[m][i][j][k2] -
lhs[n+0][i][j][k2]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
c-------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j][k1];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
c-------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];
lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];
rhs[m][i][j][k2] = rhs[m][i][j][k2] -
lhs[n+0][i][j][k2]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately (some of this is overkill
c if this is the last cell)
c-------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j][k1];
rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1];
}
}
}
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c Whether or not this is the last processor, we always have
c to complete the back-substitution
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The first three factors
c-------------------------------------------------------------------*/
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1] -
lhs[n+4][i][j][k]*rhs[m][i][j][k2];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
c-------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1] -
lhs[n+4][i][j][k]*rhs[m][i][j][k2];
}
}
}
}
}
tzetar();
}
|
TransferOP.h | /*
* TransferOP.h
*
* Created on: Jul 20, 2016
* Author: mason
*/
#ifndef TransferOP_H_
#define TransferOP_H_
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
class TransferParams {
public:
vector<Param> W;
PAlphabet elems;
int nVSize;
int nInSize;
int nOutSize;
public:
TransferParams() {
nVSize = 0;
}
inline void exportAdaParams(ModelUpdate& ada) {
for(int idx = 0; idx < nVSize; idx++) {
ada.addParam(&(W[idx]));
}
}
inline void initial(PAlphabet alpha, int nOSize, int nISize) {
elems = alpha;
nVSize = elems->size();
nInSize = nISize;
nOutSize = nOSize;
W.resize(nVSize);
for(int idx = 0; idx < nVSize; idx++) {
W[idx].initial(nOSize, nISize);
}
}
inline int getElemId(const string& strFeat) {
return elems->from_string(strFeat);
}
// will add it
inline void save(std::ofstream &os) const {
}
// will add it
inline void load(std::ifstream &is) {
}
};
class TransferNode : public Node {
public:
PNode in;
int xid;
TransferParams* param;
public:
TransferNode() : Node() {
in = NULL;
xid = -1;
param = NULL;
}
inline void setParam(TransferParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
xid = -1;
}
public:
void forward(Graph *cg, PNode x, const string& strNorm) {
in = x;
xid = param->getElemId(strNorm);
if (xid < 0) {
std::cout << "TransferNode warning: could find the label: " << strNorm << std::endl;
}
degree = 0;
in->addParent(this);
}
public:
void compute() {
if (xid >= 0) {
val.mat() = param->W[xid].val.mat() * in->val.mat();
}
}
void backward() {
if(xid >= 0) {
param->W[xid].grad.mat() += loss.mat() * in->val.tmat();
in->loss.mat() += param->W[xid].val.mat().transpose() * loss.mat();
}
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
TransferNode* conv_other = (TransferNode*)other;
if (param != conv_other->param) {
return false;
}
if (xid != conv_other->xid) {
return false;
}
return true;
}
};
class TransferExecute :public Execute {
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute TransferNode::generate(bool bTrain, dtype cur_drop_factor) {
TransferExecute* exec = new TransferExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
#endif /* TransferOP_H_ */
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (colors > MaxColormapSize)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
GetPixelInfo(image,image->colormap+i);
pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1)));
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
image->colormap[i].alpha_trait=BlendPixelTrait;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int)
GetPixelInfoIntensity((const Image *) NULL,color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
ssize_t
x;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
i=ConstrainColormapIndex(image,GetPixelIndex(image,q),exception);
index=(Quantum) pixels[i];
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
pbkdf2_hmac_sha256_fmt_plug.c | /* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Based on hmac-sha512 by magnum
*
* Minor fixes, format unification and OMP support done by Dhiru Kholia
* <dhiru@openwall.com>
*
* Fixed for supporting $ml$ "dave" format as well as GRUB native format by
* magnum 2013. Note: We support a binary size of >512 bits (64 bytes / 128
* chars of hex) but we currently do not calculate it even in cmp_exact(). The
* chance for a 512-bit hash collision should be pretty dang slim.
*
* the pbkdf2_sha256_hmac was so messed up, I simply copied sha512 over the top
* of it, replacing the code in totality. JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pbkdf2_hmac_sha256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pbkdf2_hmac_sha256);
#else
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include <stdint.h>
#include "misc.h"
#include "arch.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "sha2.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha256.h"
#include "pbkdf2_hmac_common.h"
#define FORMAT_LABEL "PBKDF2-HMAC-SHA256"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "PBKDF2-SHA256 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define MAX_CIPHERTEXT_LENGTH 1024 /* Bump this and code will adopt */
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define BENCHMARK_LENGTH -1
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#endif
#include "memdbg.h"
#define PAD_SIZE 128
#define PLAINTEXT_LENGTH 125
static struct custom_salt {
uint8_t length;
uint8_t salt[PBKDF2_32_MAX_SALT_SIZE + 3];
uint32_t rounds;
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[PBKDF2_SHA256_BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt salt;
char *p, *c = ciphertext;
uint32_t rounds;
memset(&salt, 0, sizeof(salt));
c += PBKDF2_SHA256_TAG_LEN;
rounds = strtol(c, NULL, 10);
c = strchr(c, '$') + 1;
p = strchr(c, '$');
if (p-c==14 && rounds==20000) {
// for now, assume this is a cisco8 hash
strnzcpy((char*)(salt.salt), c, 15);
salt.length = 14;
salt.rounds = rounds;
return (void*)&salt;
}
salt.length = base64_convert(c, e_b64_mime, p-c, salt.salt, e_b64_raw, sizeof(salt.salt), flg_Base64_MIME_PLUS_TO_DOT, 0);
salt.rounds = rounds;
return (void *)&salt;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef SSE_GROUP_SZ_SHA256
int lens[SSE_GROUP_SZ_SHA256], i;
unsigned char *pin[SSE_GROUP_SZ_SHA256];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA256];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA256_BINARY_SIZE, 0);
#else
pbkdf2_sha256((const unsigned char*)(saved_key[index]), strlen(saved_key[index]),
cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA256_BINARY_SIZE, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], PBKDF2_SHA256_BINARY_SIZE);
}
/* Check the FULL binary, just for good measure. There is no chance we'll
have a false positive here but this function is not performance sensitive.
This function not done linke pbkdf2_hmac_sha512. Simply return 1.
*/
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->rounds;
}
struct fmt_main fmt_pbkdf2_hmac_sha256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
PBKDF2_SHA256_BINARY_SIZE,
PBKDF2_32_BINARY_ALIGN,
SALT_SIZE,
sizeof(ARCH_WORD),
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ PBKDF2_SHA256_FORMAT_TAG, FORMAT_TAG_CISCO8 },
pbkdf2_hmac_sha256_common_tests
}, {
init,
done,
fmt_default_reset,
pbkdf2_hmac_sha256_prepare,
pbkdf2_hmac_sha256_valid,
fmt_default_split,
pbkdf2_hmac_sha256_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
/// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64, true if the tail-allocated integer is
/// unsigned.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
/// integer. 7 bits because it is the minimal number of bits to represent a
/// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
/// tail-allocated APValue.
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 3 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
/// 8 bits would be enough, but we require (and test for) at least 16 bits
/// to mirror FunctionType.
unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class LambdaExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class LambdaExpr;
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
unsigned ExplicitResultType : 1;
/// The number of captures.
unsigned NumCaptures : 16;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
/// The likelihood of a branch being taken.
enum Likelihood {
LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute.
LH_None, ///< No attribute set or branches of the IfStmt have
///< the same attribute.
LH_Likely ///< Branch has the [[likely]] attribute.
};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \returns the likelihood of a set of attributes.
static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs);
/// \returns the likelihood of a statement.
static Likelihood getLikelihood(const Stmt *S);
/// \returns the likelihood attribute of a statement.
static const Attr *getLikelihoodAttr(const Stmt *S);
/// \returns the likelihood of the 'then' branch of an 'if' statement. The
/// 'else' branch is required to determine whether both branches specify the
/// same likelihood, which affects the result.
static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else);
/// \returns whether the likelihood of the branches of an if statement are
/// conflicting. When the first element is \c true there's a conflict and
/// the Attr's are the conflicting attributes of the Then and Else Stmt.
static std::tuple<bool, const Attr *, const Attr *>
determineLikelihoodConflict(const Stmt *Then, const Stmt *Else);
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LPL, SourceLocation RPL, Stmt *Then,
SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase = nullptr;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc, RParenLoc;
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2020, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#include "facedetection_export.h"
//#define _ENABLE_AVX512 //Please enable it if X64 CPU
//#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
FACEDETECTION_EXPORT int *facedetect_cnn(
unsigned char *result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char *rgb_image_data, int width, int height,
int step); //input image, it must be BGR (three channels) insteed of RGB image!
/*
DO NOT EDIT the following code if you don't really understand it.
*/
#if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
//NEON does not support UINT8*INT8 dot product
//to conver the input data to range [0, 127],
//and then use INT8*INT8 dot product
#define _MAX_UINT8_VALUE 127
#else
#define _MAX_UINT8_VALUE 255
#endif
#if defined(_ENABLE_AVX512)
#define _MALLOC_ALIGN 512
#elif defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX512) && defined(_ENABLE_NEON)
#error Cannot enable the two of AVX512 and NEON at the same time.
#endif
#if defined(_ENABLE_AVX2) && defined(_ENABLE_NEON)
#error Cannot enable the two of AVX and NEON at the same time.
#endif
#if defined(_ENABLE_AVX512) && defined(_ENABLE_AVX2)
#error Cannot enable the two of AVX512 and AVX2 at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
#include <typeinfo>
using namespace std;
void *myAlloc(size_t size);
void myFree_(void *ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a, b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a, b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_ {
float score;
int x;
int y;
int w;
int h;
int lm[10];
} FaceRect;
typedef struct ConvInfoStruct_ {
int pad;
int stride;
int kernel_size;
int channels;
int num;
float scale;
signed char *pWeights;
signed int *pBias;
} ConvInfoStruct;
template<class T>
class CDataBlob {
public:
T *data;
int width;
int height;
int channels;
int channelStep;
float scale;
//when the datablob is a filter, the bias is 0 by default
//if it is the filted data, the bias is 1 by default
int bias;
public:
CDataBlob() {
data = 0;
width = 0;
height = 0;
channels = 0;
channelStep = 0;
scale = 1.0f;
bias = 0;
}
CDataBlob(int w, int h, int c) {
data = 0;
create(w, h, c);
}
~CDataBlob() {
setNULL();
}
void setNULL() {
if (data)
myFree(&data);
width = height = channels = channelStep = 0;
scale = 1.0f;
}
bool create(int w, int h, int c) {
setNULL();
width = w;
height = h;
channels = c;
bias = 0;
//alloc space for int8 array
int remBytes = (sizeof(T) * channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
this->channelStep = channels * sizeof(T);
else
this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes;
data = (T *) myAlloc(size_t(width) * height * this->channelStep);
if (data == NULL) {
cerr << "Failed to alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data, 0, width * height * channelStep);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++) {
for (int c = 0; c < this->width; c++) {
int pixel_end = this->channelStep / sizeof(T);
T *pI = (this->data +
(size_t(r) * this->width + c) * this->channelStep / sizeof(T));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8FilterData(signed char *pData, int bias, int dataWidth, int dataHeight,
int dataChannels) {
if (pData == NULL) {
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(signed char) != typeid(T)) {
cerr << "Data must be signed char, the same with the source data." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels) {
cerr << "The dimension of the data can not match that of the Blob." << endl;
return false;
}
for (int row = 0; row < height; row++)
for (int col = 0; col < width; col++) {
T *p = (this->data + (size_t(width) * row + col) * channelStep / sizeof(T));
for (int ch = 0; ch < channels; ch++) {
p[ch] = pData[ch * height * width + row * width + col];
}
}
this->bias = bias;
return true;
}
bool
setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char *imgData, int imgWidth, int imgHeight,
int imgChannels, int imgWidthStep) {
if (imgData == NULL) {
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(unsigned char) != typeid(T)) {
cerr << "Data must be unsigned char, the same with the source data." << endl;
return false;
}
if (imgChannels != 3) {
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth + 1) / 2, (imgHeight + 1) / 2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data, 0, size_t(width) * height * channelStep);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++) {
for (int c = 0; c < this->width; c++) {
T *pData = (unsigned char *) this->data +
(size_t(r) * this->width + c) * this->channelStep;
for (int fy = -1; fy <= 1; fy++) {
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++) {
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char *pImgData =
imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx;
int output_channel_offset =
((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
#if defined(_ENABLE_NEON)
pData[output_channel_offset] = (pImgData[0] / 2);
pData[output_channel_offset + 1] = (pImgData[1] / 2);
pData[output_channel_offset + 2] = (pImgData[2] / 2);
#else
pData[output_channel_offset] = (pImgData[0]);
pData[output_channel_offset + 1] = (pImgData[1]);
pData[output_channel_offset + 2] = (pImgData[2]);
#endif
}
}
}
}
#if defined(_ENABLE_NEON)
this->bias = 1; // 1/2 = 0
this->scale = 0.5f;
#else
this->bias = 1;
this->scale = 1.0f;
#endif
return true;
}
T getElement(int x, int y, int channel) {
if (this->data) {
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels) {
T *p = this->data + (size_t(y) * this->width + x) * this->channelStep / sizeof(T);
return (p[channel]);
}
}
return (T) (0);
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob) {
output << "DataBlob Size (Width, Height, Channel, scale) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ", " << dataBlob.scale
<< ", " << dataBlob.bias
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++) {
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++) {
output << "(";
for (int col = 0; col < dataBlob.width; col++) {
T *p = (dataBlob.data +
(dataBlob.width * row + col) * dataBlob.channelStep / sizeof(T));
if (sizeof(T) < 4)
output << (int) (p[ch]);
else
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob<signed char> *> filters;
int pad;
int stride;
float scale; //element * scale = original value
Filters() {
pad = 0;
stride = 0;
scale = 0;
}
~Filters() {
for (int i = 0; i < filters.size(); i++) {
delete filters[i];
filters[i] = 0;
}
}
};
bool convertInt2Float(CDataBlob<int> *inputData, CDataBlob<float> *outputData);
bool convolution(CDataBlob<unsigned char> *inputData, const Filters *filters,
CDataBlob<int> *outputData);
bool convolution_relu(CDataBlob<unsigned char> *inputData, const Filters *filters,
CDataBlob<unsigned char> *outputData);
bool
maxpooling2x2S2(const CDataBlob<unsigned char> *inputData, CDataBlob<unsigned char> *outputData);
bool priorbox(const CDataBlob<unsigned char> *featureData, int img_width, int img_height, int step,
int num_sizes, float *pWinSizes, CDataBlob<float> *outputData);
template<typename T>
bool concat4(const CDataBlob<T> *inputData1, const CDataBlob<T> *inputData2,
const CDataBlob<T> *inputData3, const CDataBlob<T> *inputData4,
CDataBlob<T> *outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
template<typename T>
bool blob2vector(const CDataBlob<T> *inputData, CDataBlob<T> *outputData);
bool softmax1vector2class(CDataBlob<float> *inputOutputData);
bool detection_output(const CDataBlob<float> *priorbox, const CDataBlob<float> *loc,
const CDataBlob<float> *conf, float overlap_threshold,
float confidence_threshold, int top_k, int keep_top_k,
CDataBlob<float> *outputData);
vector <FaceRect> objectdetect_cnn(unsigned char *rgbImageData, int with, int height, int step);
|
functions.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2018 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file functions.h
* \brief Generic quantum computing functions
*/
#ifndef FUNCTIONS_H_
#define FUNCTIONS_H_
namespace qpp {
// Eigen function wrappers
/**
* \brief Transpose
*
* \param A Eigen expression
* \return Transpose of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
transpose(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::transpose()");
return rA.transpose();
}
/**
* \brief Complex conjugate
*
* \param A Eigen expression
* \return Complex conjugate of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
conjugate(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::conjugate()");
return rA.conjugate();
}
/**
* \brief Adjoint
*
* \param A Eigen expression
* \return Adjoint (Hermitian conjugate) of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> adjoint(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::adjoint()");
// END EXCEPTION CHECKS
return rA.adjoint();
}
/**
* \brief Inverse
*
* \param A Eigen expression
* \return Inverse of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> inverse(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::inverse()");
// END EXCEPTION CHECKS
return rA.inverse();
}
/**
* \brief Trace
*
* \param A Eigen expression
* \return Trace of \a A, as a scalar over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar trace(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::trace()");
// END EXCEPTION CHECKS
return rA.trace();
}
/**
* \brief Determinant
*
* \param A Eigen expression
* \return Determinant of \a A, as a scalar over the same scalar field as \a A.
* Returns \f$\pm \infty\f$ when the determinant overflows/underflows.
*/
template <typename Derived>
typename Derived::Scalar det(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::det()");
// END EXCEPTION CHECKS
return rA.determinant();
}
/**
* \brief Logarithm of the determinant
*
* Useful when the determinant overflows/underflows
*
* \param A Eigen expression
* \return Logarithm of the determinant of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar logdet(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::logdet()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::logdet()");
// END EXCEPTION CHECKS
Eigen::PartialPivLU<dyn_mat<typename Derived::Scalar>> lu(rA);
dyn_mat<typename Derived::Scalar> U =
lu.matrixLU().template triangularView<Eigen::Upper>();
typename Derived::Scalar result = std::log(U(0, 0));
for (idx i = 1; i < static_cast<idx>(rA.rows()); ++i)
result += std::log(U(i, i));
return result;
}
/**
* \brief Element-wise sum of \a A
*
* \param A Eigen expression
* \return Element-wise sum of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar sum(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sum()");
// END EXCEPTION CHECKS
return rA.sum();
}
/**
* \brief Element-wise product of \a A
*
* \param A Eigen expression
* \return Element-wise product of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar prod(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::prod()");
// END EXCEPTION CHECKS
return rA.prod();
}
/**
* \brief Frobenius norm
*
* \param A Eigen expression
* \return Frobenius norm of \a A
*/
template <typename Derived>
double norm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::norm()");
// END EXCEPTION CHECKS
// convert matrix to complex then return its norm
return (rA.template cast<cplx>()).norm();
}
/**
* \brief Full eigen decomposition
* \see qpp::heig()
*
* \param A Eigen expression
* \return Pair of: 1. Eigenvalues of \a A, as a complex dynamic column vector,
* and 2. Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
std::pair<dyn_col_vect<cplx>, cmat>
eig(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::eig()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::eig()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
return std::make_pair(es.eigenvalues(), es.eigenvectors());
}
/**
* \brief Eigenvalues
* \see qpp::hevals()
*
* \param A Eigen expression
* \return Eigenvalues of \a A, as a complex dynamic column vector
*/
template <typename Derived>
dyn_col_vect<cplx> evals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::evals()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::evals()");
// END EXCEPTION CHECKS
return eig(rA).first;
}
/**
* \brief Eigenvectors
* \see qpp::hevects()
*
* \param A Eigen expression
* \return Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
cmat evects(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::evects()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::evects()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
return eig(rA).second;
}
/**
* \brief Full eigen decomposition of Hermitian expression
* \see qpp::eig()
*
* \param A Eigen expression
* \return Pair of: 1. Eigenvalues of \a A, as a real dynamic column vector,
* and 2. Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
std::pair<dyn_col_vect<double>, cmat>
heig(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::heig()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::heig()");
// END EXCEPTION CHECKS
Eigen::SelfAdjointEigenSolver<cmat> es(rA.template cast<cplx>());
return std::make_pair(es.eigenvalues(), es.eigenvectors());
}
/**
* \brief Hermitian eigenvalues
* \see qpp::evals()
*
* \param A Eigen expression
* \return Eigenvalues of Hermitian \a A, as a real dynamic column vector
*/
template <typename Derived>
dyn_col_vect<double> hevals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hevals()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::hevals()");
// END EXCEPTION CHECKS
return heig(rA).first;
}
/**
* \brief Hermitian eigenvectors
* \see qpp::evects()
*
* \param A Eigen expression
* \return Eigenvectors of Hermitian \a A, as columns of a complex matrix
*/
template <typename Derived>
cmat hevects(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hevects()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::hevects()");
// END EXCEPTION CHECKS
return heig(rA).second;
}
/**
* \brief Full singular value decomposition
*
* \param A Eigen expression
* \return Tuple of: 1. Left sigular vectors of \a A, as columns of a complex
* dynamic matrix, 2. Singular values of \a A, ordered in decreasing order,
* as a real dynamic column vector, and 3. Right singular vectors of \a A,
* as columns of a complex dynamic matrix
*/
template <typename Derived>
std::tuple<cmat, dyn_col_vect<double>, cmat>
svd(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svd()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullU |
Eigen::DecompositionOptions::ComputeFullV);
return std::make_tuple(sv.matrixU(), sv.singularValues(), sv.matrixV());
}
/**
* \brief Singular values
*
* \param A Eigen expression
* \return Singular values of \a A, ordered in decreasing order,
* as a real dynamic column vector
*/
template <typename Derived>
dyn_col_vect<double> svals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svals()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(rA);
return sv.singularValues();
}
/**
* \brief Left singular vectors
*
* \param A Eigen expression
* \return Complex dynamic matrix, whose columns are the left singular
* vectors of \a A
*/
template <typename Derived>
cmat svdU(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svdU()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullU);
return sv.matrixU();
}
/**
* \brief Right singular vectors
*
* \param A Eigen expression
* \return Complex dynamic matrix, whose columns are the right singular
* vectors of \a A
*/
template <typename Derived>
cmat svdV(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svdV()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullV);
return sv.matrixV();
}
// Matrix functional calculus
/**
* \brief Functional calculus f(A)
*
* \param A Eigen expression
* \param f Pointer-to-function from complex to complex
* \return \a \f$f(A)\f$
*/
template <typename Derived>
cmat funm(const Eigen::MatrixBase<Derived>& A, cplx (*f)(const cplx&)) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::funm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::funm()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
cmat evects = es.eigenvectors();
cmat evals = es.eigenvalues();
for (idx i = 0; i < static_cast<idx>(evals.rows()); ++i)
evals(i) = (*f)(evals(i)); // apply f(x) to each eigenvalue
cmat evalsdiag = evals.asDiagonal();
return evects * evalsdiag * evects.inverse();
}
/**
* \brief Matrix square root
*
* \param A Eigen expression
* \return Matrix square root of \a A
*/
template <typename Derived>
cmat sqrtm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sqrtm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::sqrtm()");
// END EXCEPTION CHECKS
return funm(rA, &std::sqrt);
}
/**
* \brief Matrix absolute value
*
* \param A Eigen expression
* \return Matrix absolute value of \a A
*/
template <typename Derived>
cmat absm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::absm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::absm()");
// END EXCEPTION CHECKS
return sqrtm(adjoint(rA) * rA);
}
/**
* \brief Matrix exponential
*
* \param A Eigen expression
* \return Matrix exponential of \a A
*/
template <typename Derived>
cmat expm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::expm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::expm()");
// END EXCEPTION CHECKS
return funm(rA, &std::exp);
}
/**
* \brief Matrix logarithm
*
* \param A Eigen expression
* \return Matrix logarithm of \a A
*/
template <typename Derived>
cmat logm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::logm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::logm()");
// END EXCEPTION CHECKS
return funm(rA, &std::log);
}
/**
* \brief Matrix sin
*
* \param A Eigen expression
* \return Matrix sine of \a A
*/
template <typename Derived>
cmat sinm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sinm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::sinm()");
// END EXCEPTION CHECKS
return funm(rA, &std::sin);
}
/**
* \brief Matrix cos
*
* \param A Eigen expression
* \return Matrix cosine of \a A
*/
template <typename Derived>
cmat cosm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::cosm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::cosm()");
// END EXCEPTION CHECKS
return funm(rA, &std::cos);
}
/**
* \brief Matrix power
* \see qpp::powm()
*
* Uses the spectral decomposition of \a A to compute the matrix power.
* By convention \f$A^0 = I\f$.
*
* \param A Eigen expression
* \param z Complex number
* \return Matrix power \f$A^z\f$
*/
template <typename Derived>
cmat spectralpowm(const Eigen::MatrixBase<Derived>& A, const cplx z) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::spectralpowm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::spectralpowm()");
// END EXCEPTION CHECKS
// Define A^0 = Id, for z IDENTICALLY zero
if (real(z) == 0 && imag(z) == 0)
return cmat::Identity(rA.rows(), rA.rows());
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
cmat evects = es.eigenvectors();
cmat evals = es.eigenvalues();
for (idx i = 0; i < static_cast<idx>(evals.rows()); ++i)
evals(i) = std::pow(evals(i), z);
cmat evalsdiag = evals.asDiagonal();
return evects * evalsdiag * evects.inverse();
}
/**
* \brief Fast matrix power based on the SQUARE-AND-MULTIPLY algorithm
* \see qpp::spectralpowm()
*
* Explicitly multiplies the matrix \a A with itself \a n times.
* By convention \f$A^0 = I\f$.
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Matrix power \f$A^n\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> powm(const Eigen::MatrixBase<Derived>& A,
idx n) {
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(A))
throw exception::ZeroSize("qpp::powm()");
// check square matrix
if (!internal::check_square_mat(A))
throw exception::MatrixNotSquare("qpp::powm()");
// END EXCEPTION CHECKS
// if n = 1, return the matrix unchanged
if (n == 1)
return A;
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Identity(A.rows(), A.rows());
// if n = 0, return the identity (as just prepared in result)
if (n == 0)
return result;
dyn_mat<typename Derived::Scalar> cA = A.derived(); // copy
// fast matrix power
for (; n > 0; n /= 2) {
if (n % 2)
result = (result * cA).eval();
cA = (cA * cA).eval();
}
return result;
}
/**
* \brief Schatten matrix norm
*
* \param A Eigen expression
* \param p Real number, greater or equal to 1,
* use qpp::infty for \f$p = \infty\f$
* \return Schatten-\a p matrix norm of \a A
*/
template <typename Derived>
double schatten(const Eigen::MatrixBase<Derived>& A, double p) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::schatten()");
if (p < 1)
throw exception::OutOfRange("qpp::schatten()");
// END EXCEPTION CHECKS
if (p == infty) // infinity norm (largest singular value)
return svals(rA)(0);
const dyn_col_vect<double> sv = svals(rA);
double result = 0;
for (idx i = 0; i < static_cast<idx>(sv.rows()); ++i)
result += std::pow(sv[i], p);
return std::pow(result, 1. / p);
}
// other functions
/**
* \brief Functor
*
* \param A Eigen expression
* \param f Pointer-to-function from scalars of \a A to \a OutputScalar
* \return Component-wise \f$f(A)\f$, as a dynamic matrix
* over the \a OutputScalar scalar field
*/
template <typename OutputScalar, typename Derived>
dyn_mat<OutputScalar>
cwise(const Eigen::MatrixBase<Derived>& A,
OutputScalar (*f)(const typename Derived::Scalar&)) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::cwise()");
// END EXCEPTION CHECKS
dyn_mat<OutputScalar> result(rA.rows(), rA.cols());
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < static_cast<idx>(rA.cols()); ++j)
for (idx i = 0; i < static_cast<idx>(rA.rows()); ++i)
result(i, j) = (*f)(rA(i, j));
return result;
}
// Kronecker product of multiple matrices, preserve return type
// variadic template
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* Used to stop the recursion for the variadic template version of
* qpp::kron()
*
* \param head Eigen expression
* \return Its argument \a head
*/
template <typename T>
dyn_mat<typename T::Scalar> kron(const T& head) {
return head;
}
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param head Eigen expression
* \param tail Variadic Eigen expression (zero or more parameters)
* \return Kronecker product of all input parameters,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename T, typename... Args>
dyn_mat<typename T::Scalar> kron(const T& head, const Args&... tail) {
return internal::kron2(head, kron(tail...));
}
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param As std::vector of Eigen expressions
* \return Kronecker product of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> kron(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
if (As.size() == 0)
throw exception::ZeroSize("qpp::kron()");
for (auto&& it : As)
if (!internal::check_nonzero_size(it))
throw exception::ZeroSize("qpp::kron()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result = As[0].derived();
for (idx i = 1; i < As.size(); ++i) {
result = kron(result, As[i]);
}
return result;
}
// Kronecker product of a list of matrices, preserve return type
// deduce the template parameters from initializer_list
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param As std::initializer_list of Eigen expressions,
* such as \a {A1, A2, ... ,Ak}
* \return Kronecker product of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
kron(const std::initializer_list<Derived>& As) {
return kron(std::vector<Derived>(As));
}
/**
* \brief Kronecker power
* \see qpp::kron()
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Kronecker product of \a A with itself \a n times \f$A^{\otimes n}\f$,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> kronpow(const Eigen::MatrixBase<Derived>& A,
idx n) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::kronpow()");
// check out of range
if (n == 0)
throw exception::OutOfRange("qpp::kronpow()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> As(n, rA);
return kron(As);
}
// Direct sum of multiple matrices, preserve return type
// variadic template
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* Used to stop the recursion for the variadic template version of
* qpp::dirsum()
*
* \param head Eigen expression
* \return Its argument \a head
*/
template <typename T>
dyn_mat<typename T::Scalar> dirsum(const T& head) {
return head;
}
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param head Eigen expression
* \param tail Variadic Eigen expression (zero or more parameters)
* \return Direct sum of all input parameters,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename T, typename... Args>
dyn_mat<typename T::Scalar> dirsum(const T& head, const Args&... tail) {
return internal::dirsum2(head, dirsum(tail...));
}
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param As std::vector of Eigen expressions
* \return Direct sum of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> dirsum(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
if (As.size() == 0)
throw exception::ZeroSize("qpp::dirsum()");
for (auto&& it : As)
if (!internal::check_nonzero_size(it))
throw exception::ZeroSize("qpp::dirsum()");
// END EXCEPTION CHECKS
idx total_rows = 0, total_cols = 0;
for (idx i = 0; i < As.size(); ++i) {
total_rows += static_cast<idx>(As[i].rows());
total_cols += static_cast<idx>(As[i].cols());
}
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Zero(total_rows, total_cols);
idx cur_row = 0, cur_col = 0;
for (idx i = 0; i < As.size(); ++i) {
result.block(cur_row, cur_col, As[i].rows(), As[i].cols()) = As[i];
cur_row += static_cast<idx>(As[i].rows());
cur_col += static_cast<idx>(As[i].cols());
}
return result;
}
// Direct sum of a list of matrices, preserve return type
// deduce the template parameters from initializer_list
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param As std::initializer_list of Eigen expressions,
* such as \a {A1, A2, ... ,Ak}
* \return Direct sum of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
dirsum(const std::initializer_list<Derived>& As) {
return dirsum(std::vector<Derived>(As));
}
/**
* \brief Direct sum power
* \see qpp::dirsum()
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Direct sum of \a A with itself \a n times \f$A^{\oplus n}\f$,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> dirsumpow(const Eigen::MatrixBase<Derived>& A,
idx n) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::dirsumpow()");
// check out of range
if (n == 0)
throw exception::OutOfRange("qpp::dirsumpow()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> As(n, rA);
return dirsum(As);
}
/**
* \brief Reshape
*
* Uses column-major order when reshaping (same as MATLAB)
*
* \param A Eigen expression
* \param rows Number of rows of the reshaped matrix
* \param cols Number of columns of the reshaped matrix
* \return Reshaped matrix with \a rows rows and \a cols columns,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reshape(const Eigen::MatrixBase<Derived>& A,
idx rows, idx cols) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
idx Arows = static_cast<idx>(rA.rows());
idx Acols = static_cast<idx>(rA.cols());
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reshape()");
if (Arows * Acols != rows * cols)
throw exception::DimsMismatchMatrix("qpp::reshape()");
// END EXCEPTION CHECKS
return Eigen::Map<dyn_mat<typename Derived::Scalar>>(
const_cast<typename Derived::Scalar*>(rA.data()), rows, cols);
}
/**
* \brief Commutator
* \see qpp::anticomm()
*
* Commutator \f$ [A,B] = AB - BA \f$.
* Both \a A and \a B must be Eigen expressions over the same scalar field.
*
* \param A Eigen expression
* \param B Eigen expression
* \return Commutator \f$AB -BA\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar> comm(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::comm()");
// check zero-size
if (!internal::check_nonzero_size(rA) || !internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::comm()");
// check square matrices
if (!internal::check_square_mat(rA) || !internal::check_square_mat(rB))
throw exception::MatrixNotSquare("qpp::comm()");
// check equal dimensions
if (rA.rows() != rB.rows())
throw exception::DimsNotEqual("qpp::comm()");
// END EXCEPTION CHECKS
return rA * rB - rB * rA;
}
/**
* \brief Anti-commutator
* \see qpp::comm()
*
* Anti-commutator \f$ \{A,B\} = AB + BA \f$.
* Both \a A and \a B must be Eigen expressions over the same scalar field.
*
* \param A Eigen expression
* \param B Eigen expression
* \return Anti-commutator \f$AB +BA\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
anticomm(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::anticomm()");
// check zero-size
if (!internal::check_nonzero_size(rA) || !internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::anticomm()");
// check square matrices
if (!internal::check_square_mat(rA) || !internal::check_square_mat(rB))
throw exception::MatrixNotSquare("qpp::anticomm()");
// check equal dimensions
if (rA.rows() != rB.rows())
throw exception::DimsNotEqual("qpp::anticomm()");
// END EXCEPTION CHECKS
return rA * rB + rB * rA;
}
/**
* \brief Projector
*
* Normalized projector onto state vector
*
* \param A Eigen expression
* \return Projector onto the state vector \a A, or the matrix \a Zero
* if \a A has norm zero (i.e. smaller than qpp::eps),
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> prj(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::prj()");
// check column vector
if (!internal::check_cvector(rA))
throw exception::MatrixNotCvector("qpp::prj()");
// END EXCEPTION CHECKS
double normA = norm(rA);
if (normA > eps)
return rA * adjoint(rA) / (normA * normA);
else
return dyn_mat<typename Derived::Scalar>::Zero(rA.rows(), rA.rows());
}
/**
* \brief Gram-Schmidt orthogonalization
*
* \param As std::vector of Eigen expressions as column vectors
* \return Gram-Schmidt vectors of \a As as columns of a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> grams(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
// check empty list
if (!internal::check_nonzero_size(As))
throw exception::ZeroSize("qpp::grams()");
for (auto&& it : As)
if (!internal::check_nonzero_size(it))
throw exception::ZeroSize("qpp::grams()");
// check that As[0] is a column vector
if (!internal::check_cvector(As[0]))
throw exception::MatrixNotCvector("qpp::grams()");
// now check that all the rest match the size of the first vector
for (auto&& it : As)
if (it.rows() != As[0].rows() || it.cols() != 1)
throw exception::DimsNotEqual("qpp::grams()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> cut =
dyn_mat<typename Derived::Scalar>::Identity(As[0].rows(), As[0].rows());
dyn_mat<typename Derived::Scalar> vi =
dyn_mat<typename Derived::Scalar>::Zero(As[0].rows(), 1);
std::vector<dyn_mat<typename Derived::Scalar>> outvecs;
// find the first non-zero vector in the list
idx pos = 0;
for (pos = 0; pos < As.size(); ++pos) {
if (norm(As[pos]) > eps) // add it as the first element
{
outvecs.push_back(As[pos]);
break;
}
}
// start the process
for (idx i = pos + 1; i < As.size(); ++i) {
cut -= prj(outvecs[i - 1 - pos]);
vi = cut * As[i];
outvecs.push_back(vi);
}
dyn_mat<typename Derived::Scalar> result(As[0].rows(), outvecs.size());
idx cnt = 0;
for (auto&& it : outvecs) {
double normA = norm(it);
if (normA > eps) // we add only the non-zero vectors
{
result.col(cnt) = it / normA;
cnt++;
}
}
return result.block(0, 0, As[0].rows(), cnt);
}
// deduce the template parameters from initializer_list
/**
* \brief Gram-Schmidt orthogonalization
*
* \param As std::initializer_list of Eigen expressions as column vectors
* \return Gram-Schmidt vectors of \a As as columns of a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
grams(const std::initializer_list<Derived>& As) {
return grams(std::vector<Derived>(As));
}
/**
* \brief Gram-Schmidt orthogonalization
*
* \param A Eigen expression, the input vectors are the columns of \a A
* \return Gram-Schmidt vectors of the columns of \a A,
* as columns of a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> grams(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::grams()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> input;
for (idx i = 0; i < static_cast<idx>(rA.cols()); ++i)
input.push_back(rA.col(i));
return grams<dyn_mat<typename Derived::Scalar>>(input);
}
/**
* \brief Non-negative integer index to multi-index
* \see qpp::multiidx2n()
*
* Uses standard lexicographical order, i.e. 00...0, 00...1 etc.
*
* \param n Non-negative integer index
* \param dims Dimensions of the multi-partite system
* \return Multi-index of the same size as \a dims
*/
inline std::vector<idx> n2multiidx(idx n, const std::vector<idx>& dims) {
// EXCEPTION CHECKS
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::n2multiidx()");
if (n >= std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>()))
throw exception::OutOfRange("qpp::n2multiidx()");
// END EXCEPTION CHECKS
// double the size for matrices reshaped as vectors
idx result[2 * maxn];
internal::n2multiidx(n, dims.size(), dims.data(), result);
return std::vector<idx>(result, result + dims.size());
}
/**
* \brief Multi-index to non-negative integer index
* \see qpp::n2multiidx()
*
* Uses standard lexicographical order, i.e. 00...0, 00...1 etc.
*
* \param midx Multi-index
* \param dims Dimensions of the multi-partite system
* \return Non-negative integer index
*/
inline idx multiidx2n(const std::vector<idx>& midx,
const std::vector<idx>& dims) {
// EXCEPTION CHECKS
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::multiidx2n()");
for (idx i = 0; i < dims.size(); ++i)
if (midx[i] >= dims[i])
throw exception::OutOfRange("qpp::multiidx2n()");
// END EXCEPTION CHECKS
return internal::multiidx2n(midx.data(), dims.size(), dims.data());
}
/**
* \brief Multi-partite qudit ket
* \see ket template<char... Bits> qpp::operator "" _ket()
*
*
* Constructs the multi-partite qudit ket \f$|\mathrm{mask}\rangle\f$,
* where \a mask is a std::vector of non-negative integers.
* Each element in \a mask has to be smaller than the corresponding element
* in \a dims.
*
* \param mask std::vector of non-negative integers
* \param dims Dimensions of the multi-partite system
* \return Multi-partite qudit state vector, as a complex dynamic column vector
*/
inline ket mket(const std::vector<idx>& mask, const std::vector<idx>& dims) {
idx N = mask.size();
idx D = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mket()");
// check valid dims
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::mket()");
// check mask and dims have the same size
if (mask.size() != dims.size())
throw exception::SubsysMismatchDims("qpp::mket()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= dims[i])
throw exception::SubsysMismatchDims("qpp::mket()");
// END EXCEPTION CHECKS
ket result = ket::Zero(D);
idx pos = multiidx2n(mask, dims);
result(pos) = 1;
return result;
}
/**
* \brief Multi-partite qudit ket
* \see ket template<char... Bits> qpp::operator "" _ket()
*
* Constructs the multi-partite qudit ket \f$|\mathrm{mask}\rangle\f$,
* all subsystem having equal dimension \a d.
* \a mask is a std::vector of non-negative integers, and
* each element in \a mask has to be strictly smaller than \a d.
*
* \param mask std::vector of non-negative integers
* \param d Subsystem dimensions
* \return Multi-partite qudit state vector, as a complex dynamic column vector
*/
inline ket mket(const std::vector<idx>& mask, idx d = 2) {
idx N = mask.size();
idx D = static_cast<idx>(std::llround(std::pow(d, N)));
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mket()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::mket()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= d)
throw exception::SubsysMismatchDims("qpp::mket()");
// END EXCEPTION CHECKS
ket result = ket::Zero(D);
std::vector<idx> dims(N, d);
idx pos = multiidx2n(mask, dims);
result(pos) = 1;
return result;
}
/**
* \brief Projector onto multi-partite qudit ket
* \see cmat template<char... Bits> qpp::operator "" _prj()
*
* Constructs the projector onto the multi-partite qudit ket
* \f$|\mathrm{mask}\rangle\f$,
* where \a mask is a std::vector of non-negative integers.
* Each element in \a mask has to be smaller than the corresponding element
* in \a dims.
*
* \param mask std::vector of non-negative integers
* \param dims Dimensions of the multi-partite system
* \return Projector onto multi-partite qudit state vector,
* as a complex dynamic matrix
*/
inline cmat mprj(const std::vector<idx>& mask, const std::vector<idx>& dims) {
idx N = mask.size();
idx D = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mprj()");
// check valid dims
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::mprj()");
// check mask and dims have the same size
if (mask.size() != dims.size())
throw exception::SubsysMismatchDims("qpp::mprj()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= dims[i])
throw exception::SubsysMismatchDims("qpp::mprj()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(D, D);
idx pos = multiidx2n(mask, dims);
result(pos, pos) = 1;
return result;
}
/**
* \brief Projector onto multi-partite qudit ket
* \see cmat template<char... Bits> qpp::operator "" _prj()
*
* Constructs the projector onto the multi-partite qudit ket
* \f$|\mathrm{mask}\rangle\f$,
* all subsystem having equal dimension \a d.
* \a mask is a std::vector of non-negative integers, and
* each element in \a mask has to be strictly smaller than \a d.
*
* \param mask std::vector of non-negative integers
* \param d Subsystem dimensions
* \return Projector onto multi-partite qudit state vector,
* as a complex dynamic matrix
*/
inline cmat mprj(const std::vector<idx>& mask, idx d = 2) {
idx N = mask.size();
idx D = static_cast<idx>(std::llround(std::pow(d, N)));
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mprj()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::mprj()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= d)
throw exception::SubsysMismatchDims("qpp::mprj()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(D, D);
std::vector<idx> dims(N, d);
idx pos = multiidx2n(mask, dims);
result(pos, pos) = 1;
return result;
}
/**
* \brief Computes the absolute values squared of an STL-like range
* of complex numbers
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Real vector consisting of the range absolute values squared
*/
template <typename InputIterator>
std::vector<double> abssq(InputIterator first, InputIterator last) {
std::vector<double> weights(std::distance(first, last));
std::transform(first, last, std::begin(weights),
[](cplx z) -> double { return std::norm(z); });
return weights;
}
/**
* \brief Computes the absolute values squared of an STL-like container
*
* \param c STL-like container
* \return Real vector consisting of the container's absolute values squared
*/
template <typename Container>
std::vector<double>
abssq(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr)
// we need the std::enable_if to SFINAE out Eigen expressions
// that will otherwise match, instead of matching
// the overload below:
// template<typename Derived>
// abssq(const Eigen::MatrixBase<Derived>& A)
{
return abssq(std::begin(c), std::end(c));
}
/**
* \brief Computes the absolute values squared of an Eigen expression
* \param A Eigen expression
* \return Real vector consisting of the absolute values squared
*/
template <typename Derived>
std::vector<double> abssq(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::abssq()");
// END EXCEPTION CHECKS
return abssq(rA.data(), rA.data() + rA.size());
}
/**
* \brief Element-wise sum of an STL-like range
*
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Element-wise sum of the range,
* as a scalar over the same scalar field as the range
*/
template <typename InputIterator>
typename std::iterator_traits<InputIterator>::value_type
sum(InputIterator first, InputIterator last) {
using value_type = typename std::iterator_traits<InputIterator>::value_type;
return std::accumulate(first, last, static_cast<value_type>(0));
}
/**
* \brief Element-wise sum of the elements of an STL-like container
*
* \param c STL-like container
* \return Element-wise sum of the elements of the container,
* as a scalar over the same scalar field as the container
*/
template <typename Container>
typename Container::value_type
sum(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr) {
return sum(std::begin(c), std::end(c));
}
/**
* \brief Element-wise product of an STL-like range
*
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Element-wise product of the range,
* as a scalar over the same scalar field as the range
*/
template <typename InputIterator>
typename std::iterator_traits<InputIterator>::value_type
prod(InputIterator first, InputIterator last) {
using value_type = typename std::iterator_traits<InputIterator>::value_type;
return std::accumulate(first, last, static_cast<value_type>(1),
std::multiplies<value_type>());
}
/**
* \brief Element-wise product of the elements of an STL-like container
*
* \param c STL-like container
* \return Element-wise product of the elements of the container,
* as a scalar over the same scalar field as the container
*/
template <typename Container>
typename Container::value_type
prod(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr) {
return prod(std::begin(c), std::end(c));
}
/**
* \brief Finds the pure state representation of a matrix
* proportional to a projector onto a pure state
*
* \note No purity check is done, the input state \a A must have rank one,
* otherwise the function returns the first non-zero eigenvector of \a A
*
* \param A Eigen expression, assumed to be proportional
* to a projector onto a pure state, i.e. \a A is assumed to have rank one
* \return The unique non-zero eigenvector of \a A (up to a phase),
* as a dynamic column vector over the same scalar field as \a A
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
rho2pure(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::rho2pure()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::rho2pure()");
// END EXCEPTION CHECKS
dyn_col_vect<double> tmp_evals = hevals(rA);
cmat tmp_evects = hevects(rA);
dyn_col_vect<typename Derived::Scalar> result =
dyn_col_vect<typename Derived::Scalar>::Zero(rA.rows());
// find the non-zero eigenvector
// there is only one, assuming the state is pure
for (idx k = 0; k < static_cast<idx>(rA.rows()); ++k) {
if (std::abs(tmp_evals(k)) > eps) {
result = tmp_evects.col(k);
break;
}
}
return result;
}
/**
* \brief Constructs the complement of a subsystem vector
*
* \param subsys Subsystem vector
* \param N Total number of systems
* \return Complement of \a subsys with respect to the set
* \f$\{0, 1, \ldots, N - 1\}\f$
*/
template <typename T>
std::vector<T> complement(std::vector<T> subsys, idx N) {
// EXCEPTION CHECKS
if (N < subsys.size())
throw exception::OutOfRange("qpp::complement()");
// END EXCEPTION CHECKS
std::vector<T> all(N);
std::vector<T> subsys_bar(N - subsys.size());
std::iota(std::begin(all), std::end(all), 0);
std::sort(std::begin(subsys), std::end(subsys));
std::set_difference(std::begin(all), std::end(all), std::begin(subsys),
std::end(subsys), std::begin(subsys_bar));
return subsys_bar;
}
/**
* \brief Computes the 3-dimensional real Bloch vector
* corresponding to the qubit density matrix \a A
* \see qpp::bloch2rho()
*
* \note It is implicitly assumed that the density matrix is Hermitian
*
* \param A Eigen expression
* \return 3-dimensional Bloch vector
*/
template <typename Derived>
std::vector<double> rho2bloch(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check qubit matrix
if (!internal::check_qubit_matrix(rA))
throw exception::NotQubitMatrix("qpp::rho2bloch()");
// END EXCEPTION CHECKS
std::vector<double> result(3);
cmat X(2, 2), Y(2, 2), Z(2, 2);
X << 0, 1, 1, 0;
Y << 0, -1_i, 1_i, 0;
Z << 1, 0, 0, -1;
result[0] = std::real(trace(rA * X));
result[1] = std::real(trace(rA * Y));
result[2] = std::real(trace(rA * Z));
return result;
}
/**
* \brief Computes the density matrix corresponding to
* the 3-dimensional real Bloch vector \a r
* \see qpp::rho2bloch()
*
* \param r 3-dimensional real vector
* \return Qubit density matrix
*/
inline cmat bloch2rho(const std::vector<double>& r) {
// EXCEPTION CHECKS
// check 3-dimensional vector
if (r.size() != 3)
throw exception::CustomException("qpp::bloch2rho",
"r is not a 3-dimensional vector!");
// END EXCEPTION CHECKS
cmat X(2, 2), Y(2, 2), Z(2, 2), Id2(2, 2);
X << 0, 1, 1, 0;
Y << 0, -1_i, 1_i, 0;
Z << 1, 0, 0, -1;
Id2 << 1, 0, 0, 1;
return (Id2 + r[0] * X + r[1] * Y + r[2] * Z) / 2.;
}
inline namespace literals {
// Idea taken from http://techblog.altplus.co.jp/entry/2017/11/08/130921
/**
* \brief Multi-partite qubit ket user-defined literal
* \see qpp::mket()
*
* Constructs the multi-partite qubit ket \f$|\mathrm{Bits}\rangle\f$
*
* \tparam Bits String of binary numbers representing the qubit ket
* \return Multi-partite qubit ket, as a complex dynamic column vector
*/
template <char... Bits>
ket operator"" _ket() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
qpp::ket q = qpp::ket::Zero(std::pow(2, n));
idx pos = 0;
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _ket())xxx");
}
// END EXCEPTION CHECKS
pos = std::stoi(bits, nullptr, 2);
q(pos) = 1;
return q;
}
/**
* \brief Multi-partite qubit bra user-defined literal
* \see qpp::mket() and qpp::adjoint()
*
* Constructs the multi-partite qubit bra \f$\langle\mathrm{Bits}|\f$
*
* \tparam Bits String of binary numbers representing the qubit bra
* \return Multi-partite qubit bra, as a complex dynamic row vector
*/
template <char... Bits>
bra operator"" _bra() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
qpp::bra q = qpp::ket::Zero(std::pow(2, n));
idx pos = 0;
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _bra())xxx");
}
// END EXCEPTION CHECKS
pos = std::stoi(bits, nullptr, 2);
q(pos) = 1;
return q;
}
/**
* \brief Multi-partite qubit projector user-defined literal
* \see qpp::mprj()
*
* Constructs the multi-partite qubit projector
* \f$|\mathrm{Bits}\rangle\langle\mathrm{Bits}|\f$ (in the computational basis)
*
* \tparam Bits String of binary numbers representing the qubit state
* to project on
* \return Multi-partite qubit projector, as a complex dynamic matrix
*/
template <char... Bits>
cmat operator"" _prj() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _prj())xxx");
}
// END EXCEPTION CHECKS
return kron(operator""_ket<Bits...>(), operator""_bra<Bits...>());
}
} /* inline namespace literals */
} /* namespace qpp */
#endif /* FUNCTIONS_H_ */
|
gt.filter.c | /*
* PROJECT: GEM-Tools library
* FILE: gt.filter.c
* DATE: 02/08/2012
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION: Application to filter {MAP,SAM,FASTQ} files and output the filtered result
*/
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "gem_tools.h"
#define GT_FILTER_FLOAT_NO_VALUE (-1.0)
#define gt_filter_cond_fatal_error_msg(condition,error_msg,args...) \
gt_cond_fatal_error_msg(condition,error_msg ". File '%s', line %"PRIu64"\n",##args, \
parameters.name_input_file,__buffered_input->current_line_num-1)
#define gt_filter_fatal_error_msg(error_msg,args...) \
gt_fatal_error_msg(error_msg ". File '%s', line %"PRIu64"\n",##args, \
parameters.name_input_file,__buffered_input->current_line_num-1)
typedef struct {
uint64_t min;
uint64_t max;
} gt_filter_quality_range;
typedef struct {
/* I/O */
char* name_input_file;
char* name_output_file;
char* name_reference_file;
char* name_gem_index_file;
char* annotation;
gt_gtf* gtf;
bool mmap_input;
bool paired_end;
bool no_output;
gt_file_format output_format;
bool discarded_output;
bool check_duplicates;
char* name_discarded_output_file;
gt_file_format discarded_output_format;
/* Filter Read/Qualities */
bool hard_trim;
uint64_t left_trim;
uint64_t right_trim;
bool restore_trim;
bool uniform_read;
bool uniform_read_strict;
bool qualities_to_offset_33;
bool qualities_to_offset_64;
bool remove_qualities;
bool add_qualities;
/* Filter Template/Alignments */
bool mapped;
bool unmapped;
int64_t unique_level;
float min_length;
float max_length;
int64_t min_maps;
int64_t max_maps;
float max_strata_after_map;
/* Make templates unique */
int64_t reduce_to_unique_strata;
int64_t reduce_by_quality;
bool reduce_to_pairs;
uint64_t reduce_to_unique;
bool reduce_by_gene_id;
bool reduce_to_protein_coding;
/* RNA Seq to recalculate counters */
bool reduce_by_junctions;
bool no_split_maps;
bool only_split_maps;
bool no_penalty_for_splitmaps;
uint64_t min_intron_length;
uint64_t min_block_length;
/* Filter SE-Maps */
bool first_map;
bool keep_first_map;
bool keep_unique;
bool matches_pruning;
uint64_t max_decoded_matches;
uint64_t min_decoded_strata;
uint64_t max_output_matches;
uint64_t max_input_matches;
bool make_counters;
bool only_unmapped;
bool only_mapped;
float min_event_distance;
float max_event_distance;
float min_levenshtein_distance;
float max_levenshtein_distance;
gt_vector* map_ids;
gt_shash* gtf_types;
bool filter_by_strand_se;
bool allow_strand_r;
bool allow_strand_f;
gt_vector* quality_score_ranges; /* (gt_filter_quality_range) */
/* Filter PE-Maps */
int64_t max_inss;
int64_t min_inss;
bool filter_by_strand_pe;
bool allow_strand_rf;
bool allow_strand_fr;
bool allow_strand_ff;
bool allow_strand_rr;
/* Filter-Realign */
bool mismatch_recovery;
bool realign_hamming;
bool realign_levenshtein;
/* Checking/Report */
bool check;
bool check_format;
gt_file_format check_file_format;
/* Hidden */
bool special_functionality;
bool error_plot; // Print error distribution (depreciated)
bool insert_size_plot; // Print insert size distribution (depreciated)
bool show_sequence_list; // Display sequence list in the GEMindex/.fa...
bool display_pretty; // Display pretty printed map(s)
bool group_reads; // Group previously split reads
bool sample_read; // Sample the read in chunks (annotated by chunk group)
float split_chunk_size;
float split_step_size;
float split_left_trim;
float split_right_trim;
float split_min_remainder;
/* Misc */
uint64_t num_threads;
bool verbose;
/* Control flags */
bool perform_dna_map_filter; // Any DNA-filtering criteria activated
bool perform_rna_map_filter; // Any RNA-filtering criteria activated
bool perform_annotation_filter; // Any annotation based filtering criteria activated
bool load_index;
} gt_filter_args;
gt_filter_args parameters = {
/* I/O */
.name_input_file=NULL,
.name_output_file=NULL,
.name_reference_file=NULL,
.name_gem_index_file=NULL,
.annotation = NULL,
.gtf = NULL,
.mmap_input=false,
.paired_end=false,
.no_output=false,
.output_format=FILE_FORMAT_UNKNOWN,
.discarded_output = false,
.name_discarded_output_file=NULL,
.discarded_output_format=FILE_FORMAT_UNKNOWN,
.check_duplicates=false,
/* Filter Read/Qualities */
.hard_trim=false,
.left_trim=0,
.right_trim=0,
.restore_trim=false,
.uniform_read=false,
.uniform_read_strict=false,
.qualities_to_offset_33=false,
.qualities_to_offset_64=false,
.remove_qualities=false,
.add_qualities=false,
/* Filter Template/Alignments */
.mapped=false,
.unmapped=false,
.unique_level=-1,
.min_length=-1.0,
.max_length=-1.0,
.min_maps=-1,
.max_strata_after_map=-1.0,
.max_maps=-1,
/* Make templates unique */
.reduce_to_unique_strata=-1,
.reduce_by_gene_id=false,
.reduce_by_junctions=false,
.reduce_to_protein_coding=false,
.reduce_to_unique=UINT64_MAX,
.reduce_to_pairs=false,
.reduce_by_quality=-1,
/* RNA Seq */
.no_split_maps=false,
.only_split_maps=false,
.no_penalty_for_splitmaps=false,
.min_intron_length=0,
.min_block_length=0,
/* Filter SE-Maps */
.first_map=false,
.keep_first_map=false,
.keep_unique=false,
.matches_pruning=false,
.max_decoded_matches=GT_ALL,
.min_decoded_strata=0,
.max_output_matches=GT_ALL,
.max_input_matches=GT_ALL,
.make_counters=false,
.only_unmapped=false,
.only_mapped=false,
.min_event_distance=GT_FILTER_FLOAT_NO_VALUE,
.max_event_distance=GT_FILTER_FLOAT_NO_VALUE,
.min_levenshtein_distance=GT_FILTER_FLOAT_NO_VALUE,
.max_levenshtein_distance=GT_FILTER_FLOAT_NO_VALUE,
.map_ids=NULL,
.gtf_types=NULL,
.filter_by_strand_se=false,
.allow_strand_r=false,
.allow_strand_f=false,
.quality_score_ranges = NULL,
/* Filter PE-Maps */
.max_inss=INT64_MAX,
.min_inss=INT64_MIN,
.filter_by_strand_pe=false,
.allow_strand_rf=false,
.allow_strand_fr=false,
.allow_strand_ff=false,
.allow_strand_rr=false,
/* Filter-Realign */
.mismatch_recovery=false,
.realign_hamming=false,
.realign_levenshtein=false,
/* Checking/Report */
.check = false,
.check_format = false,
/* Hidden */
.special_functionality = false,
.error_plot = false,
.insert_size_plot = false,
.show_sequence_list = false,
.display_pretty = false,
.group_reads = false,
.sample_read = false,
.split_chunk_size = -1.0,
.split_step_size = -1.0,
.split_left_trim = -1.0,
.split_right_trim = -1.0,
.split_min_remainder = 0.0,
/* Misc */
.num_threads=1,
.verbose=false,
/* Control flags */
.perform_dna_map_filter=false,
.perform_rna_map_filter=false,
.perform_annotation_filter=false,
.load_index=false
};
/*
* Helper to get num maps correctly also for unpaired
* mapped pairs
*/
GT_INLINE uint64_t gt_filter_get_num_maps(gt_template* template){
GT_TEMPLATE_IF_SE_ALINGMENT(template) {
return gt_template_get_num_mmaps(template);
} else {
if (!gt_template_is_mapped(template)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template,alignment_end1,alignment_end2);
return gt_alignment_get_num_maps(alignment_end1) + gt_alignment_get_num_maps(alignment_end2);
} else {
return gt_template_get_num_mmaps(template);
}
}
}
/*
* Checking/(Re)Aligning/MismsRecovery
*/
GT_INLINE void gt_filter_mismatch_recovery_maps(
char* const name_input_file,const uint64_t current_line_num,
gt_template* const template,gt_sequence_archive* const sequence_archive) {
// Unfolded as to report errors in the recovery
gt_status error_code;
uint64_t alignment_pos = 0;
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
uint64_t map_pos = 0;
GT_ALIGNMENT_ITERATE(alignment,map) {
if ((error_code=gt_map_recover_mismatches_sa(map,alignment->read,sequence_archive))) {
gt_error_msg("Unrecoverable Alignment '%s':%"PRIu64"\n\tREAD::'"PRIgts"':%"PRIu64":%"PRIu64" ",
name_input_file,current_line_num,PRIgts_content(template->tag),alignment_pos,map_pos);
gt_output_map_fprint_map_pretty_sa(stdout,map,alignment->read,sequence_archive);
}
++map_pos;
}
gt_alignment_recalculate_counters(alignment);
++alignment_pos;
}
if (gt_template_get_num_blocks(template)>1) gt_template_recalculate_counters(template);
}
GT_INLINE bool gt_filter_check_maps(
char* const name_input_file,const uint64_t current_line_num,
gt_template* const template,gt_sequence_archive* const sequence_archive,
uint64_t* const total_algs_checked,uint64_t* const total_algs_correct,
uint64_t* const total_maps_checked,uint64_t* const total_maps_correct) {
bool alignment_correct=true;
gt_status error_code;
uint64_t alignment_pos = 0;
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
uint64_t map_pos = 0;
GT_ALIGNMENT_ITERATE(alignment,map) {
if ((error_code=gt_map_check_alignment_sa(map,alignment->read,sequence_archive))) {
gt_error_msg("Wrong Alignment '%s':%"PRIu64"\n\tREAD::'"PRIgts"':%"PRIu64":%"PRIu64" ",
name_input_file,current_line_num,PRIgts_content(template->tag),alignment_pos,map_pos);
gt_output_map_fprint_map_pretty_sa(stdout,map,alignment->read,sequence_archive);
alignment_correct = false;
} else {
++(*total_maps_correct);
}
++(*total_maps_checked);
++map_pos;
}
++alignment_pos;
}
++(*total_algs_checked);
if (alignment_correct) {
++(*total_algs_correct);
return true;
} else {
return false;
}
}
/*
* Filtering MAPs functions
*/
void gt_filter_delete_map_ids(gt_vector* filter_map_ids) {
// Free vector
if (filter_map_ids!=NULL) {
GT_VECTOR_ITERATE(filter_map_ids,map_id,pos,gt_string*) {
gt_string_delete(*map_id);
}
gt_vector_delete(filter_map_ids);
}
}
GT_INLINE bool gt_filter_is_sequence_name_allowed(gt_string* const seq_name) {
GT_VECTOR_ITERATE(parameters.map_ids,map_id,pos,gt_string*) {
if (gt_string_equals(seq_name,*map_id)) return true;
}
return false;
}
GT_INLINE bool gt_filter_is_quality_value_allowed(const uint64_t quality_score) {
GT_VECTOR_ITERATE(parameters.quality_score_ranges,quality_range,pos,gt_filter_quality_range) {
if (quality_score >= quality_range->min && quality_score <= quality_range->max) return true;
}
return false;
}
GT_INLINE void gt_filter_prune_matches(gt_template* const template) {
uint64_t max_num_matches = GT_ALL;
if (parameters.max_decoded_matches!=GT_ALL || parameters.min_decoded_strata!=0) {
uint64_t max_strata;
gt_counters_calculate_num_maps(gt_template_get_counters_vector(template),
parameters.min_decoded_strata,parameters.max_decoded_matches,&max_strata,&max_num_matches);
}
if (parameters.max_output_matches!=GT_ALL) {
max_num_matches = GT_MIN(max_num_matches,parameters.max_output_matches);
}
// Reduce matches
if (max_num_matches < GT_ALL) {
gt_template_reduce_mmaps(template,max_num_matches);
}
}
GT_INLINE bool gt_filter_has_junction(gt_map* const map,const uint64_t start,const uint64_t end) {
GT_MAP_ITERATE(map,map_block) {
if (gt_map_has_next_block(map_block)) {
const bool forward = (gt_map_get_strand(map_block) == FORWARD);
// Is the junction in the overlap ?
const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1;
const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1;
if (junctions_start == start && junctions_end == end) return true;
}
}
return false;
}
GT_INLINE uint64_t gt_filter_count_junctions_in_region(gt_map* const map,const uint64_t start,const uint64_t end) {
uint64_t count = 0;
GT_MAP_ITERATE(map,map_block) {
if (gt_map_has_next_block(map_block)) {
const bool forward = (gt_map_get_strand(map_block) == FORWARD);
// Is the junction in the overlap ?
const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1;
const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1;
if ((junctions_end >= start) && (junctions_start <= end)){
count++;
}
}
}
return count;
}
GT_INLINE bool gt_filter_are_overlapping_pairs_coherent(gt_map** const mmap) {
if (!gt_map_has_next_block(mmap[0]) && !gt_map_has_next_block(mmap[1])) return true;
// Check overlap
uint64_t overlap_start, overlap_end;
if (gt_map_block_overlap(mmap[0],mmap[1],&overlap_start,&overlap_end)) {
uint64_t junctions_in_1 = gt_filter_count_junctions_in_region(mmap[0], overlap_start, overlap_end);
uint64_t junctions_in_2 = gt_filter_count_junctions_in_region(mmap[1], overlap_start, overlap_end);
if(junctions_in_1 != junctions_in_2) return false;
GT_MAP_ITERATE(mmap[0],map_block) {
if (gt_map_has_next_block(map_block)) {
const bool forward = (gt_map_get_strand(map_block) == FORWARD);
// Is the junction in the overlap ?
const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1;
const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1;
// Find the junctions start in the other map
if (junctions_start >= overlap_start && junctions_start < overlap_end &&
!gt_filter_has_junction(mmap[1],junctions_start,junctions_end)) {
return false; // Start not found, not overlapping split maps
}
}
}
}
return true;
}
GT_INLINE void gt_filter_add_from_hit(gt_template* const template,gt_gtf_hit* hit, uint64_t target_block) {
if (hit->mmap != NULL) {
// add PE
gt_map** mmap_copy = gt_mmap_array_copy(hit->mmap, hit->num_template_blocks);
gt_template_insert_mmap(template,mmap_copy,hit->map_attributes, parameters.check_duplicates);
free(mmap_copy);
} else if(hit->map != NULL) {
if(target_block > 0){
GT_TEMPLATE_REDUCE_BOTH_ENDS(template,alignment_1, alignment_2);
if(target_block == 1){
gt_alignment_insert_map(alignment_1,gt_map_copy(hit->map), parameters.check_duplicates);
}else{
gt_alignment_insert_map(alignment_2,gt_map_copy(hit->map), parameters.check_duplicates);
}
}else{
GT_TEMPLATE_REDUCTION(template,alignment_dst);
gt_alignment_insert_map(alignment_dst,gt_map_copy(hit->map), parameters.check_duplicates);
}
}
}
GT_INLINE bool gt_filter_make_reduce_by_annotation_alignment(gt_template* const template_dst,gt_alignment* const alignment, uint64_t block, gt_gtf_hits* hits) {
bool filtered = false;
gt_gtf_search_alignment_hits(parameters.gtf, hits, alignment);
bool prot_coding = (parameters.reduce_to_protein_coding && hits->num_protein_coding >= 1);
bool gene_id = (parameters.reduce_by_gene_id && hits->num_paired_genes >= 1);
bool junction_hits = (parameters.reduce_by_junctions && hits->junction_hit_ration > 0.0);
if(gene_id || prot_coding){
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_gtf_hit* hit = *e;
if(junction_hits){
double junction_ratio = hit->num_junctions == 0 ? -1.0 : (double)hit->num_junctions_hits/(double)hit->num_junctions;
if(junction_ratio > 0.0 && junction_ratio != hits->junction_hit_ration) continue;
}
if(gene_id && !hit->pairs_gene)continue;
if(prot_coding && !hit->is_protein_coding)continue;
filtered = true;
gt_filter_add_from_hit(template_dst, hit, block);
}
}
return filtered;
}
GT_INLINE bool gt_filter_make_reduce_by_annotation(gt_template* const template_dst,gt_template* const template_src) {
bool filtered = false;
GT_TEMPLATE_IF_SE_ALINGMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
gt_gtf_hits* hits = gt_gtf_hits_new();
filtered = gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_src, 0, hits);
gt_gtf_hits_delete(hits);
return filtered;
} else {
if (!gt_template_is_mapped(template_src)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_end1,alignment_end2);
gt_gtf_hits* hits = gt_gtf_hits_new();
filtered = gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_end1, 1, hits);
if(!filtered){
// add all as we want to preserve them in case second alignment is filtered.
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_filter_add_from_hit(template_dst, *e, 1);
}
}
gt_gtf_hits_clear(hits);
if(gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_end2, 2, hits)){
filtered = true;
}else if(filtered){
// alignment 1 was filtered, so we have to copy all from alignment 2
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_filter_add_from_hit(template_dst, *e, 2);
}
}
gt_gtf_hits_delete(hits);
return filtered;
} else {
gt_gtf_hits* hits = gt_gtf_hits_new();
gt_gtf_search_template_hits(parameters.gtf, hits, template_src);
bool prot_coding = (parameters.reduce_to_protein_coding && hits->num_protein_coding >= 1);
bool gene_id = (parameters.reduce_by_gene_id && hits->num_paired_genes >= 1);
bool junction_hits = (parameters.reduce_by_junctions && hits->junction_hit_ration > 0.0);
if(gene_id || prot_coding || junction_hits){
GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){
gt_gtf_hit* hit = *e;
if(junction_hits){
double junction_ratio = hit->num_junctions == 0 ? -1.0 : (double)hit->num_junctions_hits/(double)hit->num_junctions;
if(junction_ratio > 0.0 && junction_ratio != hits->junction_hit_ration)continue;
}
if(gene_id && !hit->pairs_gene)continue;
if(prot_coding && !hit->is_protein_coding)continue;
filtered = true;
gt_filter_add_from_hit(template_dst, hit, 0);
}
}
gt_gtf_hits_delete(hits);
}
}
return filtered;
}
void gt_alignment_reduction_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) {
// Reduction by unique level (can be calculated beforehand)
GT_ALIGNMENT_ITERATE(alignment_src,map) {
if (parameters.reduce_to_unique_strata >= 0 &&
(gt_alignment_get_uniq_degree(alignment_src) >= parameters.reduce_to_unique_strata)) {
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
break;
}
if(gt_alignment_get_num_maps(alignment_src) > parameters.reduce_to_unique) break;
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
}
}
void gt_alignment_dna_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) {
const uint64_t first_matching_distance = gt_counters_get_min_matching_strata(gt_alignment_get_counters_vector(alignment_src)) - 1;
const uint64_t max_mismatch_quality = gt_alignment_get_max_mismatch_quality(alignment_src);
// Reduction by unique level (can be calculated beforehand)
bool pick_only_first_map = false;
/*
* (1) Pre-filtering steps
*/
gt_map* first_map = NULL;
if (parameters.keep_first_map && gt_alignment_get_num_maps(alignment_src)>0) {
first_map = gt_map_copy(gt_alignment_get_map(alignment_src,0));
}
/*
* (2) Filtering of maps
*/
GT_ALIGNMENT_ITERATE(alignment_src,map) {
// Check sequence name
if (parameters.map_ids!=NULL) {
if (!gt_filter_is_sequence_name_allowed(map->seq_name)) continue;
}
// Filter strata beyond first mapping
const int64_t current_stratum = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(map) : gt_map_get_global_distance(map);
if (parameters.max_strata_after_map >= 0.0 &&
(current_stratum-first_matching_distance) > gt_alignment_get_read_proportion(alignment_src,parameters.max_strata_after_map)) break;
// Check strata
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
const uint64_t total_distance = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(map) : gt_map_get_global_distance(map);
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_alignment_get_read_proportion(alignment_src,parameters.min_event_distance)) continue;
}
if (parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_alignment_get_read_proportion(alignment_src,parameters.max_event_distance)) continue;
}
}
// Check levenshtein distance
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
const uint64_t total_distance = gt_map_get_global_levenshtein_distance(map);
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_alignment_get_read_proportion(alignment_src,parameters.min_levenshtein_distance)) continue;
}
if (parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_alignment_get_read_proportion(alignment_src,parameters.max_levenshtein_distance)) continue;
}
}
// Filter strand
if (parameters.filter_by_strand_se) {
if (map->strand==FORWARD && !parameters.allow_strand_f) continue;
if (map->strand==REVERSE && !parameters.allow_strand_r) continue;
}
// Filter quality scores
if (parameters.quality_score_ranges!=NULL) {
if (!gt_filter_is_quality_value_allowed((file_format==SAM) ? map->phred_score : map->gt_score)) continue;
}
/*
* (3) Reduction of all maps
*/
if (parameters.reduce_by_quality >= 0) {
const int64_t q = gt_alignment_sum_mismatch_qualities(alignment_src,map);
if (q!=0 && q!=max_mismatch_quality && abs(max_mismatch_quality-q)<=parameters.reduce_by_quality) continue;
}
/*
* Insert the map
*/
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
// Skip the rest if first map is enabled
if (parameters.first_map || pick_only_first_map) break;
}
/*
* (4) Post-filtering steps
*/
if (parameters.keep_first_map) {
if (gt_alignment_get_num_maps(alignment_dst)==0) {
gt_alignment_insert_map(alignment_dst,first_map, parameters.check_duplicates);
} else {
gt_map_delete(first_map);
}
}
}
void gt_template_reduction_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) {
GT_TEMPLATE_IF_SE_ALINGMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
GT_TEMPLATE_REDUCTION(template_dst,alignment_dst);
gt_alignment_reduction_filter(alignment_dst,alignment_src,file_format);
} else {
if (!gt_template_is_mapped(template_src)) {
if(!parameters.reduce_to_pairs){
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2);
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2);
gt_alignment_reduction_filter(alignment_dst_end1,alignment_src_end1,file_format);
gt_alignment_reduction_filter(alignment_dst_end2,alignment_src_end2,file_format);
}
} else {
GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) {
if (parameters.reduce_to_unique_strata >= 0 && (gt_template_get_uniq_degree(template_src) >= parameters.reduce_to_unique_strata)) {
gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
break;
}
if(gt_template_get_num_mmaps(template_src) >= parameters.reduce_to_unique) break;
gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
}
}
}
}
void gt_template_dna_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) {
/*
* Filtering workflow
* (1) Pre-filtering steps
* (2) Filtering of maps (taking them into account individually)
* (3) Reduction of all maps (taking them into account as a whole)
* (4) Post-filtering steps
*/
GT_TEMPLATE_IF_SE_ALINGMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
GT_TEMPLATE_REDUCTION(template_dst,alignment_dst);
gt_alignment_dna_filter(alignment_dst,alignment_src,file_format);
} else {
if (!gt_template_is_mapped(template_src)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2);
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2);
gt_alignment_dna_filter(alignment_dst_end1,alignment_src_end1,file_format);
gt_alignment_dna_filter(alignment_dst_end2,alignment_src_end2,file_format);
} else {
const uint64_t first_matching_distance = gt_counters_get_min_matching_strata(gt_template_get_counters_vector(template_src))-1;
const uint64_t max_mismatch_quality = gt_template_get_max_mismatch_quality(template_src);
// Reduction by unique level (can be calculated beforehand)
bool pick_only_first_map = false;
/*
* (1) Pre-filtering steps
*/
gt_map** first_mmap = NULL;
gt_mmap_attributes first_mmap_attributes = {0, 0, 0};
if (parameters.keep_first_map && gt_template_get_num_mmaps(template_src)>0) {
gt_mmap* const mmap = gt_template_get_mmap(template_src,0);
first_mmap = gt_mmap_array_copy(mmap->mmap,gt_template_get_num_blocks(template_src));
first_mmap_attributes = mmap->attributes;
}
/*
* (2) Filtering of maps
*/
GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) {
const int64_t current_stratum = parameters.no_penalty_for_splitmaps ?
gt_map_get_no_split_distance(mmap[0]) + gt_map_get_no_split_distance(mmap[1]):
gt_map_get_global_distance(mmap[0]) + gt_map_get_global_distance(mmap[1]);
if (parameters.max_strata_after_map >= 0.0 &&
(current_stratum-first_matching_distance) > gt_template_get_read_proportion(template_src,parameters.max_strata_after_map)) break;
// Check sequence name
if (parameters.map_ids!=NULL) {
if (!gt_filter_is_sequence_name_allowed(mmap[0]->seq_name)) continue;
if (!gt_filter_is_sequence_name_allowed(mmap[1]->seq_name)) continue;
}
// Check strata
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
const int64_t total_distance = parameters.no_penalty_for_splitmaps ?
gt_map_get_no_split_distance(mmap[0]) + gt_map_get_no_split_distance(mmap[1]):
gt_map_get_global_distance(mmap[0]) + gt_map_get_global_distance(mmap[1]);
if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_template_get_read_proportion(template_src,parameters.min_event_distance)) continue;
}
if (parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_template_get_read_proportion(template_src,parameters.max_event_distance)) continue;
}
}
// Check levenshtein distance
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
const int64_t total_distance = gt_map_get_global_levenshtein_distance(mmap[0])+gt_map_get_global_levenshtein_distance(mmap[1]);
if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance < gt_template_get_read_proportion(template_src,parameters.min_levenshtein_distance)) continue;
}
if (parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) {
if (total_distance > gt_template_get_read_proportion(template_src,parameters.max_levenshtein_distance)) continue;
}
}
// Check inss
if (parameters.min_inss > INT64_MIN || parameters.max_inss < INT64_MAX) {
gt_status error_code;
const int64_t inss = gt_template_get_insert_size(mmap,&error_code,0,0);
if (parameters.min_inss > inss || inss > parameters.max_inss) continue;
}
// Check strandness
if (parameters.filter_by_strand_se) {
if (!parameters.allow_strand_f && (mmap[0]->strand==FORWARD || mmap[1]->strand==FORWARD)) continue;
if (!parameters.allow_strand_r && (mmap[0]->strand==REVERSE || mmap[1]->strand==REVERSE)) continue;
}
if (parameters.filter_by_strand_pe) {
if (mmap[0]->strand==FORWARD && mmap[1]->strand==REVERSE && !parameters.allow_strand_fr) continue;
if (mmap[0]->strand==REVERSE && mmap[1]->strand==FORWARD && !parameters.allow_strand_rf) continue;
if (mmap[0]->strand==FORWARD && mmap[1]->strand==FORWARD && !parameters.allow_strand_ff) continue;
if (mmap[0]->strand==REVERSE && mmap[1]->strand==REVERSE && !parameters.allow_strand_rr) continue;
}
// Filter quality scores
if (parameters.quality_score_ranges!=NULL) {
if (!gt_filter_is_quality_value_allowed((file_format==SAM) ? mmap_attributes->phred_score : mmap_attributes->gt_score)) continue;
}
/*
* (3) Reduction of all maps
*/
if (parameters.reduce_by_quality >= 0) {
const int64_t q = gt_alignment_sum_mismatch_qualities(gt_template_get_block(template_src,0), mmap[0]) +
gt_alignment_sum_mismatch_qualities(gt_template_get_block(template_src,1), mmap[1]);
if (q!=0 && q!=max_mismatch_quality && abs(max_mismatch_quality-q)<=parameters.reduce_by_quality) continue;
}
/*
* Insert the map
*/
gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
// Skip the rest if first map is enabled
if (parameters.first_map || pick_only_first_map) break;
}
/*
* (4) Post-filtering steps
*/
if (parameters.keep_first_map) {
if (gt_template_get_num_mmaps(template_dst)==0) {
gt_template_insert_mmap(template_dst,first_mmap,&first_mmap_attributes, parameters.check_duplicates);
}
free(first_mmap);
}
}
}
}
void gt_alignment_rna_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) {
GT_ALIGNMENT_ITERATE(alignment_src,map) {
// Check sequence name
if (parameters.map_ids!=NULL) {
if (!gt_filter_is_sequence_name_allowed(map->seq_name)) continue;
}
// Check SM contained
const uint64_t num_blocks = gt_map_get_num_blocks(map);
if (parameters.no_split_maps && num_blocks>1) continue;
if (parameters.only_split_maps && num_blocks==1) continue;
// Filter intron length
if (parameters.min_intron_length > 0) {
if (gt_map_get_num_blocks(map) > 1) {
if(gt_map_get_min_intron_length(map) < parameters.min_intron_length){
continue;
}
}
}
// Filter block length
if (parameters.min_block_length > 0) {
if (gt_map_get_num_blocks(map) > 1) {
if (gt_map_get_min_block_length(map) < parameters.min_block_length) continue;
}
}
// Insert the map
gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates);
// Skip the rest if best
if (parameters.first_map) return;
}
}
void gt_template_rna_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) {
GT_TEMPLATE_IF_SE_ALINGMENT(template_src) {
GT_TEMPLATE_REDUCTION(template_src,alignment_src);
GT_TEMPLATE_REDUCTION(template_dst,alignment_dst);
/*
* SE
*/
gt_alignment_rna_filter(alignment_dst,alignment_src,file_format);
} else {
/*
* PE
*/
if (!gt_template_is_mapped(template_src)) {
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2);
GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2);
gt_alignment_rna_filter(alignment_dst_end1,alignment_src_end1,file_format);
gt_alignment_rna_filter(alignment_dst_end2,alignment_src_end2,file_format);
} else {
const uint64_t num_blocks = gt_template_get_num_blocks(template_src);
GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) {
// Check SM contained and get minimum intron length
uint64_t has_sm = false;
uint64_t min_intron_length = UINT64_MAX, min_block_length = UINT64_MAX;
if (parameters.no_split_maps || parameters.only_split_maps || parameters.min_intron_length >= 0) {
GT_MMAP_ITERATE(mmap,map,end_p) {
if (gt_map_get_num_blocks(map) > 1) {
const uint64_t mil = gt_map_get_min_intron_length(map);
const uint64_t mbl = gt_map_get_min_block_length(map);
has_sm = true;
if (mil >= 0 && mil < min_intron_length) min_intron_length = mil;
if (mbl >= 0 && mbl < min_block_length) min_block_length = mbl;
}
}
}
if (parameters.no_split_maps && has_sm) continue;
if (parameters.only_split_maps && !has_sm) continue;
// Filter intron length
if (parameters.min_intron_length > 0 && min_intron_length != UINT64_MAX){
if(min_intron_length < parameters.min_intron_length){
continue;
}
}
// Filter block length
if (parameters.min_block_length > 0 && min_block_length != UINT64_MAX){
if(min_block_length < parameters.min_block_length) continue;
}
// Add the mmap
gt_map** mmap_copy = gt_mmap_array_copy(mmap,num_blocks);
gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
// Skip the rest if best
if (parameters.first_map) return;
}
}
}
}
GT_INLINE bool gt_filter_apply_filters(
const gt_file_format file_format,const uint64_t line_no,
gt_sequence_archive* const sequence_archive,gt_template* const template) {
/*
* Recalculate counters without penalty for splitmaps
*/
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}
/*
* Process Read/Qualities // TODO: move out of filter (this is processing)
*/
const uint64_t has_qualities = gt_template_has_qualities(template);
if (parameters.remove_qualities && has_qualities) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_string_clear(alignment->qualities);
}
} else if (parameters.add_qualities && !has_qualities) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
const uint64_t read_length = gt_alignment_get_read_length(alignment);
gt_string_resize(alignment->qualities,read_length+1);
gt_string_set_length(alignment->qualities,read_length);
GT_STRING_ITERATE(alignment->qualities,buffer,i) {
buffer[i]='~';
}
}
}
if (parameters.uniform_read) {
if (parameters.uniform_read_strict) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_dna_read_uniform_strict_content(alignment->read,alignment->qualities);
}
} else {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_dna_read_uniform_content(alignment->read,alignment->qualities);
}
}
}
if (has_qualities) {
if (parameters.qualities_to_offset_33) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_qualities_adapt_from_offset64_to_offset33(alignment->qualities);
}
}
if (parameters.qualities_to_offset_64) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
gt_qualities_adapt_from_offset33_to_offset64(alignment->qualities);
}
}
}
/*
* Template/Alignment Filter
*/
// Consider mapped/unmapped
const bool is_mapped = gt_template_is_mapped(template);
if (parameters.mapped && !is_mapped) return false;
if (parameters.unmapped && is_mapped) return false;
// Unique based filtering
if (parameters.unique_level>=0.0 && is_mapped) {
if (parameters.unique_level > gt_template_get_uniq_degree(template)) return false;
}
// Filter by read length
if (parameters.min_length>=0.0 || parameters.max_length>=0.0) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
const uint64_t read_length = gt_alignment_get_read_length(alignment);
if (parameters.min_length>=0.0) {
const uint64_t min_length = gt_alignment_get_read_proportion(alignment,parameters.min_length);
if (read_length < min_length) return false;
}
if (parameters.max_length>=0.0) {
const uint64_t max_length = gt_alignment_get_read_proportion(alignment,parameters.max_length);
if (read_length > max_length) return false;
}
}
}
// Filter by number of maps
if (parameters.min_maps>=0 || parameters.max_maps>=0) {
const uint64_t num_maps = gt_template_get_num_mmaps(template);
if (parameters.min_maps>=0 && num_maps<parameters.min_maps) return false;
if (parameters.max_maps>=0 && num_maps>parameters.max_maps) return false;
}
/*
* MAP Filter
*/
// Trim
if (parameters.hard_trim) {
gt_template_hard_trim(template,parameters.left_trim,parameters.right_trim);
gt_template_recalculate_counters(template);
} else if (parameters.restore_trim) {
gt_template_restore_trim(template);
gt_template_recalculate_counters(template);
}
// (Re)Align
if (parameters.realign_levenshtein) {
gt_template_realign_levenshtein(template,sequence_archive);
} else if (parameters.realign_hamming) {
gt_template_realign_hamming(template,sequence_archive);
} else if (parameters.mismatch_recovery) {
gt_filter_mismatch_recovery_maps(parameters.name_input_file,line_no,template,sequence_archive);
}
// check the split-map pairs for all paired alignments and
// remove mapping pairs where the split are not coherent
if(gt_template_get_num_blocks(template) == 2 && gt_template_is_mapped(template)){
gt_template *template_filtered = gt_template_dup(template,false,false);
const uint64_t num_blocks = gt_template_get_num_blocks(template);
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attributes) {
if (!gt_filter_are_overlapping_pairs_coherent(mmap))continue;
gt_map** mmap_copy = gt_mmap_array_copy(mmap,num_blocks);
gt_template_insert_mmap(template_filtered,mmap_copy,mmap_attributes, parameters.check_duplicates);
free(mmap_copy);
}
gt_template_swap(template,template_filtered);
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map DNA-filtering
uint64_t num_maps = gt_filter_get_num_maps(template);
if (parameters.perform_dna_map_filter && (!parameters.keep_unique || num_maps > 1)) {
gt_template *template_filtered = gt_template_dup(template,false,false);
gt_template_dna_filter(template_filtered,template,file_format);
// if keep_unique is on, we only flip if we have at least one
// alignment left
if(!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0){
gt_template_swap(template,template_filtered);
}
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map RNA-filtering
num_maps = gt_filter_get_num_maps(template);
if (parameters.perform_rna_map_filter && (!parameters.keep_unique || num_maps > 1)) {
gt_template *template_filtered = gt_template_dup(template,false,false);
gt_template_rna_filter(template_filtered,template,file_format);
// if keep_unique is on, we only flip if we have at least one
// alignment left
if(!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0){
gt_template_swap(template,template_filtered);
}
// delete filtered and recalculate counters
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map Annotation-filtering
num_maps = gt_filter_get_num_maps(template);
if (parameters.gtf != NULL && parameters.perform_annotation_filter && num_maps > 1) {
gt_template *template_filtered = gt_template_dup(template,false,false);
bool filtered = gt_filter_make_reduce_by_annotation(template_filtered,template);
if(filtered && (!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0)){
gt_template_swap(template,template_filtered);
}
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// reduce by level filter
num_maps = gt_filter_get_num_maps(template);
if ((parameters.reduce_to_unique_strata >= 0 || parameters.reduce_to_unique != UINT64_MAX|| parameters.reduce_to_pairs) && (num_maps > 1)) {
gt_template *template_filtered = gt_template_dup(template,false,false);
gt_template_reduction_filter(template_filtered,template,file_format);
gt_template_swap(template,template_filtered);
gt_template_delete(template_filtered);
if (parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters_no_splits(template);
gt_template_sort_by_distance__score_no_split(template);
}else{
gt_template_recalculate_counters(template);
}
}
// Map pruning
if (parameters.matches_pruning) gt_filter_prune_matches(template);
// Make counters
if (parameters.make_counters || parameters.no_penalty_for_splitmaps) {
gt_template_recalculate_counters(template);
}
// Ok, go on
return true;
}
GT_INLINE void gt_filter__print(
const gt_file_format file_format,const uint64_t line_no,
gt_sequence_archive* const sequence_archive,gt_template* const template,
uint64_t* const total_algs_checked,uint64_t* const total_algs_correct,
uint64_t* const total_maps_checked,uint64_t* const total_maps_correct,
gt_buffered_output_file* const buffered_output,gt_generic_printer_attributes* const generic_printer_attributes,
gt_buffered_output_file* const buffered_discarded_output,gt_generic_printer_attributes* const discarded_output_attributes) {
bool discaded = false;
/*
* Apply Filters
*/
if (!gt_filter_apply_filters(file_format,line_no,sequence_archive,template)) discaded = true;
if (parameters.uniform_read) { // Check zero-length reads
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
if (gt_alignment_get_read_length(alignment)==0) return;
}
}
/*
* Check
*/
if (!discaded && parameters.check) {
if (!gt_filter_check_maps(parameters.name_input_file,line_no,
template,sequence_archive,total_algs_checked,total_algs_correct,total_maps_checked,total_maps_correct)) discaded = true;
}
/*
* Print template
*/
if (!parameters.no_output && !discaded) {
if (gt_output_generic_bofprint_template(buffered_output,template,generic_printer_attributes)) {
gt_error_msg("Fatal error outputting read '"PRIgts"'(InputLine:%"PRIu64")\n",
PRIgts_content(gt_template_get_string_tag(template)),line_no);
}
} else if (discaded && buffered_discarded_output!=NULL) {
if (gt_output_generic_bofprint_template(buffered_discarded_output,template,discarded_output_attributes)) {
gt_error_msg("Fatal error outputting read '"PRIgts"'(InputLine:%"PRIu64")\n",
PRIgts_content(gt_template_get_string_tag(template)),line_no);
}
}
}
/*
* Special funcionality
*/
GT_INLINE void gt_filter_sample_read_print_fastq(
gt_buffered_output_file* const buffered_output,gt_string* const tag,gt_string* const read,gt_string* const qualities,
const bool print_segmented_read_info,const uint64_t segment_id,const uint64_t total_segments,
const uint64_t left_trim,const uint64_t right_trim,const uint64_t chunk_size) {
gt_bofprintf(buffered_output,"@"PRIgts,PRIgts_content(tag));
if (print_segmented_read_info) gt_output_bofprint_segmented_read_info(buffered_output,segment_id,total_segments); // Segmented Read
if (left_trim > 0) {
gt_bofprintf(buffered_output," lt:Z:%"PRIu64":"PRIgts":"PRIgts,left_trim,
PRIgts_range_content(read,0,left_trim),
PRIgts_range_content(qualities,0,left_trim)); // Left-trim
}
if (right_trim > 0) {
gt_bofprintf(buffered_output," rt:Z:%"PRIu64":"PRIgts":"PRIgts,right_trim,
PRIgts_range_content(read,left_trim+chunk_size,right_trim),
PRIgts_range_content(qualities,left_trim+chunk_size,right_trim)); // Right-trim
}
// Print READ + QUALITIES (trimmed)
gt_bofprintf(buffered_output,"\n"PRIgts"\n+\n"PRIgts"\n",
PRIgts_trimmed_content(read,left_trim,right_trim),
PRIgts_trimmed_content(qualities,left_trim,right_trim));
}
GT_INLINE void gt_filter_sample_read_print_fasta(
gt_buffered_output_file* const buffered_output,gt_string* const tag,gt_string* const read,
const bool print_segmented_read_info,const uint64_t segment_id,const uint64_t total_segments,
const uint64_t left_trim,const uint64_t right_trim,const uint64_t chunk_size) {
gt_bofprintf(buffered_output,">"PRIgts,PRIgts_content(tag));
if (print_segmented_read_info) gt_output_bofprint_segmented_read_info(buffered_output,segment_id,total_segments); // Segmented Read
if (left_trim > 0) {
gt_bofprintf(buffered_output," lt:Z:%"PRIu64":"PRIgts,left_trim,
PRIgts_range_content(read,0,left_trim)); // Left-trim
}
if (right_trim > 0) {
gt_bofprintf(buffered_output," rt:Z:%"PRIu64":"PRIgts,right_trim,
PRIgts_range_content(read,left_trim+chunk_size,right_trim)); // Right-trim
}
// Print READ (trimmed)
gt_bofprintf(buffered_output,"\n"PRIgts"\n",
PRIgts_trimmed_content(read,left_trim,right_trim));
}
GT_INLINE void gt_filter_group_reads() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Prepare out-printers
if (parameters.output_format==FILE_FORMAT_UNKNOWN) parameters.output_format = input_file->file_format; // Select output format
gt_generic_printer_attributes* const generic_printer_attributes = gt_generic_printer_attributes_new(parameters.output_format);
// SegmentedRead aux variables
gt_template* const group_template = gt_template_new();
uint64_t total_segments = 0, last_segment_id = 0;
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
// Get group attribute
gt_segmented_read_info* const segmented_read_info = gt_attributes_get_segmented_read_info(template->attributes);
if (segmented_read_info==NULL) {
gt_filter_cond_fatal_error_msg(total_segments!=last_segment_id,
"Expected SegmentedRead Info => lastRead(%"PRIu64"/%"PRIu64")",last_segment_id,total_segments);
gt_template_restore_trim(template); // If any
GT_TEMPLATE_ITERATE_ALIGNMENT(group_template,alignment) {
gt_attributes_remove(alignment->attributes,GT_ATTR_ID_SEGMENTED_READ_INFO); // If any
}
gt_output_generic_bofprint_template(buffered_output,template,generic_printer_attributes); // Print it, as it is
} else {
// First, undo the trim
gt_template_restore_trim(template);
// Tackle the group merging
if (last_segment_id==total_segments) {
/*
* New group
*/
gt_filter_cond_fatal_error_msg(segmented_read_info->total_segments==0 || segmented_read_info->segment_id!=1,
"Wrong SegmentedRead Info (Zero reads in group or not properly sorted)");
gt_template_clear(group_template,true);
gt_template_copy(group_template,template,true,true);
total_segments = segmented_read_info->total_segments;
last_segment_id = segmented_read_info->segment_id;
} else if (segmented_read_info->segment_id==last_segment_id+1 && segmented_read_info->segment_id <= total_segments) {
/*
* Old group (Keep merging)
*/
gt_filter_cond_fatal_error_msg(!gt_string_equals(template->tag,group_template->tag),
"Wrong TAG in Segmented Reads Sequence ('"PRIgts"'/'"PRIgts"')",PRIgts_content(group_template->tag),PRIgts_content(template->tag));
gt_template_merge_template_mmaps(group_template,template);
last_segment_id = segmented_read_info->segment_id;
if (last_segment_id==total_segments) { // Close group
GT_TEMPLATE_ITERATE_ALIGNMENT(group_template,alignment) {
gt_attributes_remove(alignment->attributes,GT_ATTR_ID_SEGMENTED_READ_INFO); // If any
}
gt_output_generic_bofprint_template(buffered_output,group_template,generic_printer_attributes);
}
} else {
gt_filter_fatal_error_msg("Wrong SegmentedRead Info => Expected(%"PRIu64"/%"PRIu64")::Found(%"PRIu64"/%"PRIu64").",
segmented_read_info->segment_id,segmented_read_info->total_segments,last_segment_id,total_segments);
}
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
// Check proper end of merging groups
gt_filter_cond_fatal_error_msg(total_segments!=last_segment_id,
"Expected SegmentedRead Info => lastRead(%"PRIu64"/%"PRIu64")",last_segment_id,total_segments);
// Clean
gt_template_delete(group_template);
gt_generic_printer_attributes_delete(generic_printer_attributes);
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
GT_INLINE void gt_filter_sample_read() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Parallel I/O
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
// Calculate the chunks
const uint64_t read_length = gt_alignment_get_read_length(alignment);
const uint64_t split_chunk_size = gt_get_integer_proportion(parameters.split_chunk_size,read_length);
const uint64_t split_min_remainder = gt_get_integer_proportion(parameters.split_min_remainder,read_length);
// Check boundaries
if (split_chunk_size >= read_length || split_chunk_size <= split_min_remainder) {
if (gt_alignment_has_qualities(alignment)) {
gt_filter_sample_read_print_fastq(buffered_output,alignment->tag,alignment->read,alignment->qualities,false,1,1,0,0,read_length); // FASTQ
} else {
gt_filter_sample_read_print_fasta(buffered_output,alignment->tag,alignment->read,false,1,1,0,0,read_length); // FASTA
}
continue;
}
uint64_t split_step_size = gt_get_integer_proportion(parameters.split_step_size,read_length);
if (split_step_size==0) split_step_size=1;
const uint64_t split_left_trim = gt_get_integer_proportion(parameters.split_left_trim,read_length);
const uint64_t split_right_trim = gt_get_integer_proportion(parameters.split_right_trim,read_length);
const uint64_t full_chunks = ((read_length-split_left_trim-split_right_trim-split_chunk_size)/split_step_size)+1;
uint64_t total_chunks = full_chunks;
uint64_t left_trim=split_left_trim, right_trim=read_length-split_left_trim-split_chunk_size;
// Check last chunk (remainder)
const uint64_t last_left_trim = left_trim+(split_step_size*full_chunks);
const uint64_t remainder_chunk = read_length-split_right_trim-last_left_trim;
bool print_remainder_chunk = false;
if (remainder_chunk > 0 && split_min_remainder > 0 &&
remainder_chunk < split_chunk_size && remainder_chunk >= split_min_remainder) {
print_remainder_chunk = true; ++total_chunks;
}
uint64_t i;
for (i=0;i<full_chunks;++i,left_trim+=split_step_size,right_trim-=split_step_size) {
if (gt_alignment_has_qualities(alignment)) {
gt_filter_sample_read_print_fastq(
buffered_output,alignment->tag,alignment->read,alignment->qualities,true,
i+1,total_chunks,left_trim,right_trim,split_chunk_size); // FASTQ
} else {
gt_filter_sample_read_print_fasta(
buffered_output,alignment->tag,alignment->read,true,
i+1,total_chunks,left_trim,right_trim,split_chunk_size); // FASTA
}
}
// Print last chunk (remainder)
if (print_remainder_chunk) {
if (gt_alignment_has_qualities(alignment)) {
gt_filter_sample_read_print_fastq(
buffered_output,alignment->tag,alignment->read,alignment->qualities,true,
total_chunks,total_chunks,last_left_trim,split_right_trim,remainder_chunk); // FASTQ
} else {
gt_filter_sample_read_print_fasta(
buffered_output,alignment->tag,alignment->read,true,
total_chunks,total_chunks,last_left_trim,split_right_trim,remainder_chunk); // FASTA
}
}
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
GT_INLINE void gt_filter_print_insert_size_distribution() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Parallel I/O
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
// Print insert size
if (gt_template_get_num_blocks(template)!=2) continue;
GT_TEMPLATE_ITERATE_(template,mmap) {
gt_status error_code;
gt_bofprintf(buffered_output,"%"PRIu64"\n",gt_template_get_insert_size(mmap,&error_code,0,0));
if (parameters.first_map) break;
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
GT_INLINE void gt_filter_print_error_distribution() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Parallel I/O
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
// Print levenshtein distance of the maps
if (parameters.first_map) {
uint64_t best_distance = UINT64_MAX;
GT_TEMPLATE_ITERATE_(template,mmap) {
const uint64_t dist = gt_map_get_global_levenshtein_distance(*mmap);
if (dist < best_distance) best_distance = dist;
}
if (best_distance < UINT64_MAX) gt_bofprintf(buffered_output,"%"PRIu64"\n",best_distance);
} else {
GT_TEMPLATE_ITERATE_(template,mmap) {
gt_bofprintf(buffered_output,"%"PRIu64"\n",gt_map_get_global_levenshtein_distance(*mmap));
}
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
/*
* Handler for opening an archive (GEMIndex/MULTIFastaFile)
*/
gt_sequence_archive* gt_filter_open_sequence_archive(const bool load_sequences) {
gt_sequence_archive* sequence_archive = NULL;
gt_log("Loading reference file ...");
if (parameters.name_gem_index_file!=NULL) { // Load GEM-IDX
sequence_archive = gt_sequence_archive_new(GT_BED_ARCHIVE);
gt_gemIdx_load_archive(parameters.name_gem_index_file,sequence_archive,load_sequences);
} else {
gt_input_file* const reference_file = gt_input_file_open(parameters.name_reference_file,false);
sequence_archive = gt_sequence_archive_new(GT_CDNA_ARCHIVE);
if (gt_input_multifasta_parser_get_archive(reference_file,sequence_archive)!=GT_IFP_OK) {
gt_fatal_error_msg("Error parsing reference file '%s'\n",parameters.name_reference_file);
}
gt_input_file_close(reference_file);
}
gt_log("Done.");
return sequence_archive;
}
GT_INLINE void gt_filter_display_sequence_list(){
// Show sequence archive summary
gt_sequence_archive* sequence_archive = gt_filter_open_sequence_archive(false);
gt_sequence_archive_iterator sequence_archive_it;
gt_sequence_archive_new_iterator(sequence_archive,&sequence_archive_it);
gt_segmented_sequence* seq;
while ((seq=gt_sequence_archive_iterator_next(&sequence_archive_it))) {
fprintf(stdout,"%s\t%"PRIu64"\n",seq->seq_name->buffer,seq->sequence_total_length);
}
}
/*
* I/O Filtering Loop
*/
#define GT_FILTER_CHECK_PARSING_ERROR(FORMAT) \
++record_num; \
if (error_code!=GT_IMP_OK) { \
gt_error_msg("[#%"PRIu64"]Fatal error parsing "FORMAT"file '%s', line %"PRIu64"\n", \
record_num,parameters.name_input_file,buffered_input->current_line_num-1); \
continue; \
}
void gt_filter_read__write() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* output_file, *dicarded_output_file;
// Open out file
if (!parameters.no_output) {
output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
if (parameters.discarded_output) {
if (gt_streq(parameters.name_discarded_output_file,"stdout")) {
dicarded_output_file = gt_output_stream_new(stdout,SORTED_FILE);
} else if (gt_streq(parameters.name_discarded_output_file,"stderr")) {
dicarded_output_file = gt_output_stream_new(stderr,SORTED_FILE);
} else {
dicarded_output_file = gt_output_file_new(parameters.name_discarded_output_file,SORTED_FILE);
}
}
}
// Open reference file
gt_sequence_archive* sequence_archive = NULL;
if (parameters.load_index) {
sequence_archive = gt_filter_open_sequence_archive(true);
}
// read annotaiton if specified
if (parameters.annotation != NULL && parameters.perform_annotation_filter) {
parameters.gtf = gt_gtf_read_from_file(parameters.annotation, parameters.num_threads);
}
// Parallel reading+process
uint64_t total_algs_checked=0, total_algs_correct=0, total_maps_checked=0, total_maps_correct=0;
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads) reduction(+:total_algs_checked,total_algs_correct,total_maps_checked,total_maps_correct)
#endif
{
// Prepare IN/OUT buffers & printers
gt_status error_code;
gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file);
gt_buffered_output_file *buffered_output = NULL, *buffered_discarded_output = NULL;
if (!parameters.no_output) {
buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_output);
if (parameters.discarded_output) {
buffered_discarded_output = gt_buffered_output_file_new(dicarded_output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_discarded_output);
}
}
// Prepare IN/OUT parser/printer attributes
gt_generic_printer_attributes *generic_printer_attributes=NULL, *discarded_output_attributes=NULL;
if (parameters.output_format==FILE_FORMAT_UNKNOWN) parameters.output_format = input_file->file_format; // Select output format
generic_printer_attributes = gt_generic_printer_attributes_new(parameters.output_format);
if (parameters.discarded_output) {
gt_file_format output_format = input_file->file_format;
if (parameters.discarded_output_format!=FILE_FORMAT_UNKNOWN) output_format=parameters.discarded_output_format;
discarded_output_attributes = gt_generic_printer_attributes_new(output_format);
}
/*
* READ + PROCCESS Loop
*/
uint64_t record_num = 0;
gt_template* template = gt_template_new();
if (parameters.check_format && parameters.check_file_format==FASTA) {
/*
* FASTA I/O loop
*/
while ((error_code=gt_input_fasta_parser_get_template(buffered_input,template,parameters.paired_end))) {
GT_FILTER_CHECK_PARSING_ERROR("FASTA ");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
} else if (parameters.check_format && parameters.check_file_format==MAP) {
/*
* MAP I/O loop
*/
gt_map_parser_attributes* const attr = gt_input_map_parser_attributes_new(parameters.paired_end);
while ((error_code=gt_input_map_parser_get_template(buffered_input,template,attr))) {
GT_FILTER_CHECK_PARSING_ERROR("MAP ");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
gt_input_map_parser_attributes_delete(attr);
} else if (parameters.check_format && parameters.check_file_format==SAM) {
/*
* SAM I/O loop
*/
gt_sam_parser_attributes* const attr = gt_input_sam_parser_attributes_new();
while ((error_code=gt_input_sam_parser_get_template(buffered_input,template,attr))) {
GT_FILTER_CHECK_PARSING_ERROR("SAM ");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
gt_input_sam_parser_attributes_delete(attr);
} else {
/*
* Generic I/O loop
*/
gt_generic_parser_attributes* generic_parser_attributes = gt_input_generic_parser_attributes_new(parameters.paired_end);
gt_input_map_parser_attributes_set_max_parsed_maps(generic_parser_attributes->map_parser_attributes,parameters.max_input_matches); // Limit max-matches
while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,generic_parser_attributes))) {
GT_FILTER_CHECK_PARSING_ERROR("");
// Apply all filters and print
gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template,
&total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct,
buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes);
}
gt_input_generic_parser_attributes_delete(generic_parser_attributes);
}
// Clean
gt_template_delete(template);
gt_buffered_input_file_close(buffered_input);
gt_generic_printer_attributes_delete(generic_printer_attributes);
if (!parameters.no_output) {
gt_buffered_output_file_close(buffered_output);
if (parameters.discarded_output) gt_buffered_output_file_close(buffered_discarded_output);
}
}
/*
* Print check report
*/
if (parameters.check) {
gt_log("Checked %lu alignments. Total.Correct %lu (%2.3f %%). Total.Maps.Correct %lu (%2.3f %%)",
total_algs_checked,total_algs_correct,GT_GET_PERCENTAGE(total_algs_correct,total_algs_checked),
total_maps_correct,GT_GET_PERCENTAGE(total_maps_correct,total_maps_checked));
}
// Release archive & Clean
if (sequence_archive) gt_sequence_archive_delete(sequence_archive);
gt_filter_delete_map_ids(parameters.map_ids);
if (parameters.quality_score_ranges!=NULL) gt_vector_delete(parameters.quality_score_ranges);
gt_input_file_close(input_file);
if (!parameters.no_output) {
gt_output_file_close(output_file);
if (parameters.discarded_output) gt_output_file_close(dicarded_output_file);
}
}
/*
* Argument Parsing
*/
void gt_filter_get_coma_separated_arguments_long(char* const parameters_list,const uint64_t num_params,...) {
uint64_t num_params_parsed = 0;
// Start va_args
va_list v_args;
va_start(v_args,num_params);
// Start parsing
char *opt = strtok(parameters_list,",");
while (opt!=NULL && num_params_parsed<num_params) {
uint64_t* const uint64_arg = va_arg(v_args,uint64_t*);
*uint64_arg = atoll(opt);
opt = strtok(NULL,",");
}
// End va_args
va_end(v_args);
}
GT_INLINE uint64_t gt_filter_get_coma_separated_arguments_float(char* const parameters_list,const uint64_t num_params,...) {
uint64_t num_params_parsed = 0;
// Start va_args
va_list v_args;
va_start(v_args,num_params);
// Start parsing
char *opt = strtok(parameters_list,",");
while (opt!=NULL && num_params_parsed<num_params) {
float* const float_arg = va_arg(v_args,float*);
*float_arg = atof(opt);
opt = strtok(NULL,",");
++num_params_parsed;
}
// End va_args
va_end(v_args);
return num_params_parsed;
}
void gt_filter_get_discarded_output_arguments(char* const optarg) {
// Start parsing
char *opt = strtok(optarg,",");
parameters.name_discarded_output_file = opt;
opt = strtok(NULL,","); // Next
if (opt!=NULL) {
if (gt_streq(opt,"FASTA")) {
parameters.discarded_output_format = FASTA;
} else if (gt_streq(opt,"MAP")) {
parameters.discarded_output_format = MAP;
} else if (gt_streq(opt,"SAM")) {
parameters.discarded_output_format = SAM;
} else {
gt_fatal_error_msg("Output format '%s' not recognized",opt);
}
}
}
void gt_filter_get_argument_pair_strandness(char* const strandness_opt) {
char *opt;
opt = strtok(strandness_opt,",");
while (opt!=NULL) {
if (gt_streq(opt,"FR")) {
parameters.allow_strand_fr = true;
} else if (gt_streq(opt,"RF")) {
parameters.allow_strand_rf = true;
} else if (gt_streq(opt,"FF")) {
parameters.allow_strand_ff = true;
} else if (gt_streq(opt,"RR")) {
parameters.allow_strand_rr = true;
} else {
gt_fatal_error_msg("Strandedness option not recognized '%s'\n",opt);
}
opt = strtok(NULL,","); // Reload
}
parameters.filter_by_strand_pe = true;
}
void gt_filter_get_argument_map_id(char* const maps_ids) {
// Allocate vector
parameters.map_ids = gt_vector_new(20,sizeof(gt_string*));
// Add all the valid map Ids (sequence names)
char *opt;
opt = strtok(maps_ids,",");
while (opt!=NULL) {
// Get id
gt_string* map_id = gt_string_new(0);
gt_string_set_string(map_id,opt);
// Add to the vector
gt_vector_insert(parameters.map_ids,map_id,gt_string*);
// Next
opt = strtok(NULL,","); // Reload
}
}
void gt_filter_get_argument_gtf_type(char* const maps_ids) {
// Allocate vector
parameters.gtf_types = gt_shash_new();
// Add all the valid map Ids (sequence names)
char *opt;
opt = strtok(maps_ids,",");
while (opt!=NULL) {
// Get id
gt_shash_insert(parameters.gtf_types, opt, true, bool);
// Next
opt = strtok(NULL,","); // Reload
}
}
void parse_arguments(int argc,char** argv) {
struct option* gt_filter_getopt = gt_options_adaptor_getopt(gt_filter_options);
gt_string* const gt_filter_short_getopt = gt_options_adaptor_getopt_short(gt_filter_options);
int option, option_index;
while (true) {
// Get option & Select case
if ((option=getopt_long(argc,argv,
gt_string_get_string(gt_filter_short_getopt),gt_filter_getopt,&option_index))==-1) break;
switch (option) {
/* I/O */
case 'i':
parameters.name_input_file = optarg;
break;
case 'o':
parameters.name_output_file = optarg;
if (gt_streq(optarg,"null")) parameters.no_output = true;
break;
case 'r':
parameters.name_reference_file = optarg;
break;
case 'I':
parameters.name_gem_index_file = optarg;
break;
case 200: // annotation
parameters.annotation = optarg;
break;
case 201:
parameters.mmap_input = true;
break;
case 'p':
parameters.paired_end = true;
break;
case 202: // output-format
if (gt_streq(optarg,"FASTA")) {
parameters.output_format = FASTA;
} else if (gt_streq(optarg,"MAP")) {
parameters.output_format = MAP;
} else if (gt_streq(optarg,"SAM")) {
parameters.output_format = SAM;
} else {
gt_fatal_error_msg("Output format '%s' not recognized",optarg);
}
break;
case 203: // discarded-output
parameters.discarded_output = true;
gt_filter_get_discarded_output_arguments(optarg);
break;
case 204: // no-output
parameters.no_output = true;
break;
case 205: // check-duplicates
parameters.check_duplicates = true;
break;
/* Filter Read/Qualities */
case 300: // hard-trim
parameters.hard_trim = true;
gt_filter_get_coma_separated_arguments_long(optarg,2,&(parameters.left_trim),&(parameters.right_trim));
break;
case 301: // quality-trim
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 302: // restore-trim
parameters.restore_trim = true;
break;
case 303: // uniform-read
parameters.uniform_read = true;
if (optarg && gt_streq(optarg,"strict")) parameters.uniform_read_strict = true;
break;
case 304: // qualities-to-offset-33
parameters.qualities_to_offset_33 = true;
break;
case 305: // qualities-to-offset-64
parameters.qualities_to_offset_64 = true;
break;
case 306: // remove-qualities
parameters.remove_qualities = true;
break;
case 307: // add-qualities
parameters.add_qualities = true;
break;
/* Filter Template/Alignments */
case 400:
parameters.mapped = true;
break;
case 401:
parameters.unmapped = true;
break;
case 402:
parameters.unique_level = atoll(optarg);
break;
case 403:
parameters.min_length = atof(optarg);
break;
case 404:
parameters.max_length = atof(optarg);
break;
case 405:
parameters.min_maps = atof(optarg);
break;
case 406:
parameters.max_maps = atof(optarg);
break;
/* Filter Maps */
case 500: // first-map
parameters.perform_dna_map_filter = true;
parameters.first_map = true;
break;
case 'k': // keep-first-map
parameters.keep_first_map = true;
break;
case 'u': // keep-unique
parameters.keep_unique = true;
break;
case 'd': // max-decoded-matches
parameters.matches_pruning = true;
parameters.max_decoded_matches = atoll(optarg);
break;
case 'D': // min-decoded-strata
parameters.matches_pruning = true;
parameters.min_decoded_strata = atoll(optarg);
break;
case 501: // max-output-matches
parameters.matches_pruning = true;
parameters.max_output_matches = atoll(optarg);
break;
case 502: // max-input-matches
parameters.max_input_matches = atoll(optarg);
break;
case 503: // max-strata-after-map
parameters.perform_dna_map_filter = true;
parameters.max_strata_after_map = atof(optarg);
break;
case 504: // make-counters
parameters.make_counters = true;
break;
case 505: // min-strata
parameters.perform_dna_map_filter = true;
parameters.min_event_distance = atof(optarg);
break;
case 506: // max-strata
parameters.perform_dna_map_filter = true;
parameters.max_event_distance = atof(optarg);
break;
case 507: // min-levenshtein-error
parameters.perform_dna_map_filter = true;
parameters.min_levenshtein_distance = atof(optarg);
break;
case 508: // max-levenshtein-error
parameters.perform_dna_map_filter = true;
parameters.max_levenshtein_distance = atof(optarg);
break;
case 509: // map-id
parameters.perform_dna_map_filter = true;
gt_filter_get_argument_map_id(optarg);
break;
case 510: // strandedness
parameters.perform_dna_map_filter = true;
parameters.filter_by_strand_se = true;
if (gt_streq(optarg,"F")) {
parameters.allow_strand_f = true;
} else if (gt_streq(optarg,"R")) {
parameters.allow_strand_r = true;
} else {
gt_fatal_error_msg("Strand '%s' not recognized {'F','R'}",optarg);
}
break;
case 511: // filter-quality
parameters.perform_dna_map_filter = true;
gt_filter_quality_range qrange;
gt_filter_get_coma_separated_arguments_long(optarg,2,&(qrange.min),&(qrange.max));
// Add it to the vector of ranges
if (parameters.quality_score_ranges==NULL) {
parameters.quality_score_ranges = gt_vector_new(4,sizeof(gt_filter_quality_range));
}
gt_vector_insert(parameters.quality_score_ranges,qrange,gt_filter_quality_range);
break;
case 512: // reduce-to-level
parameters.perform_dna_map_filter = true;
parameters.reduce_to_unique_strata = atol(optarg);
break;
case 513: // reduce-by-quality
parameters.perform_dna_map_filter = true;
parameters.reduce_by_quality = atol(optarg);
break;
case 514: // reduce-by-annotation
parameters.reduce_by_gene_id = true;
parameters.perform_annotation_filter = true;
break;
case 515: // reduce-to-unique
parameters.reduce_to_unique = atol(optarg);
break;
case 516: // reduce-to-pairs
parameters.reduce_to_pairs = true;
break;
case 517: // reduce-to-protein-coding
parameters.reduce_to_protein_coding = true;
parameters.perform_annotation_filter = true;
break;
case 518: // reduce-by_junctions
parameters.reduce_by_junctions = true;
parameters.perform_annotation_filter = true;
break;
/* Filter RNA-Maps */
case 600: // no-split-maps
parameters.no_split_maps = true;
parameters.perform_rna_map_filter = true;
break;
case 601: // only-split-maps
parameters.only_split_maps = true;
parameters.perform_rna_map_filter = true;
break;
case 's': // no-penalty-for-splitmaps
parameters.no_penalty_for_splitmaps = true;
break;
case 603: // min-intron-length
parameters.min_intron_length = atol(optarg);
parameters.perform_rna_map_filter = true;
break;
case 604: // min-block-length
parameters.min_block_length = atol(optarg);
parameters.perform_rna_map_filter = true;
break;
/* Filter PE-Maps */
case 700: // pair-strandness
parameters.perform_dna_map_filter = true;
gt_filter_get_argument_pair_strandness(optarg);
break;
case 701: // min-inss
parameters.perform_dna_map_filter = true;
parameters.min_inss = atoll(optarg);
break;
case 702: // max-inss
parameters.perform_dna_map_filter = true;
parameters.max_inss = atoll(optarg);
break;
/* Realign/Check */
case 800: // mismatch-recovery
parameters.load_index = true;
parameters.mismatch_recovery = true;
break;
case 801: // hamming-realign
parameters.load_index = true;
parameters.realign_hamming = true;
break;
case 802: // levenshtein-realign
parameters.load_index = true;
parameters.realign_levenshtein = true;
break;
/* Checking/Report */
case 'c': // check
parameters.load_index = true;
parameters.check = true;
break;
case 'C': // check-only
parameters.load_index = true;
parameters.check = true;
parameters.no_output = true;
break;
case 803: // check-format
parameters.check_format = true;
if (gt_streq(optarg,"FASTA")) {
parameters.check_file_format = FASTA;
} else if (gt_streq(optarg,"MAP")) {
parameters.check_file_format = MAP;
} else if (gt_streq(optarg,"SAM")) {
parameters.check_file_format = SAM;
} else {
gt_fatal_error_msg("Check format '%s' not recognized",optarg);
}
break;
/* Split/Grouping */
case 900: // split-read
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 901: // sample-read
parameters.special_functionality = true;
parameters.sample_read = true;
gt_cond_fatal_error_msg(gt_filter_get_coma_separated_arguments_float(optarg,5,
&(parameters.split_chunk_size),&(parameters.split_step_size),
&(parameters.split_left_trim),&(parameters.split_right_trim),
&(parameters.split_min_remainder))<4,
"Too few parameters provided to option --split-read");
break;
case 902: // group-read-chunks
parameters.special_functionality = true;
parameters.group_reads = true;
break;
/* Display/Information */
case 1000:
parameters.special_functionality = true;
parameters.error_plot = true;
break;
case 1001:
parameters.special_functionality = true;
parameters.insert_size_plot = true;
break;
case 1002:
parameters.special_functionality = true;
parameters.load_index = true;
parameters.show_sequence_list = true;
break;
case 1003:
parameters.special_functionality = true;
parameters.load_index = true;
parameters.display_pretty = true;
break;
/* Misc */
case 't': // threads
#ifdef HAVE_OPENMP
parameters.num_threads = atol(optarg);
#endif
gt_cond_fatal_error_msg(parameters.num_threads > GT_MAX_OUTPUT_BUFFERS,
"Excessive number of threads (maximum %"PRId32")",GT_MAX_OUTPUT_BUFFERS);
break;
case 'v': // verbose
parameters.verbose = true;
break;
case 'h': // help
fprintf(stderr, "USE: ./gt.filter [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_filter_options,gt_filter_groups,false,false);
exit(1);
case 'H': // full-help
fprintf(stderr, "USE: ./gt.filter [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_filter_options,gt_filter_groups,false,true);
exit(1);
case 'J':
gt_options_fprint_json_menu(stderr,gt_filter_options,gt_filter_groups,true,false);
exit(1);
break;
case '?':
default:
gt_fatal_error_msg("Option not recognized");
}
}
/*
* Parameters check
*/
if (parameters.load_index && parameters.name_reference_file==NULL && parameters.name_gem_index_file==NULL) {
gt_fatal_error_msg("Reference file required");
}
// Free
gt_string_delete(gt_filter_short_getopt);
}
/*
* Main
*/
int main(int argc,char** argv) {
// GT error handler
gt_handle_error_signals();
// Parsing command-line options
parse_arguments(argc,argv);
/*
* Select functionality
*/
if (parameters.show_sequence_list) {
gt_filter_display_sequence_list();
} else if (parameters.group_reads) {
gt_filter_group_reads();
} else if (parameters.sample_read) {
gt_filter_sample_read();
// Depreciated
} else if (parameters.error_plot) {
gt_filter_print_insert_size_distribution();
} else if (parameters.insert_size_plot) {
gt_filter_print_error_distribution();
// Depreciated
} else {
gt_filter_read__write(); // Filter !!
}
return 0;
}
|
convdw5x5s2_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
//int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
// NOTE unroll outh 2 results somewhat speed drop :| (about -4%)
// so we do not implement it here
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v20.4s, v21.4s}, [%2] \n"// v20 v21 = r016 r017
"fmul v11.4s, v18.4s, %14.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r02
"fmla v8.4s, v17.4s, %14.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r010
"fmla v9.4s, v19.4s, %14.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r03
"fmla v10.4s, v22.4s, %14.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r011
"fmla v11.4s, v25.4s, %14.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r04
"fmla v8.4s, v23.4s, %14.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r012
"fmla v9.4s, v26.4s, %14.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v12.4s, v13.4s}, [%3], #32 \n"// v12 v13 = r10 r11
"fmla v10.4s, v24.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v14.4s, v15.4s}, [%3], #32 \n"// v14 v15 = r18 r19
"fmla v11.4s, v27.4s, %15.s[0] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v20.4s, v21.4s}, [%3] \n"// v20 v21 = r116 r117
"fmla v9.4s, v14.4s, %15.s[1] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r12
"fmla v10.4s, v13.4s, %15.s[2] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r110
"fmla v11.4s, v15.4s, %15.s[2] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r13
"fmla v8.4s, v22.4s, %15.s[3] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r111
"fmla v9.4s, v25.4s, %15.s[3] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r14
"fmla v10.4s, v23.4s, %16.s[0] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r112
"fmla v11.4s, v26.4s, %16.s[0] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v16.4s, v17.4s}, [%4], #32 \n"// v16 v17 = r20 r21
"fmla v8.4s, v24.4s, %16.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v18.4s, v19.4s}, [%4], #32 \n"// v18 v19 = r28 r29
"fmla v9.4s, v27.4s, %16.s[1] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v20.4s, v21.4s}, [%4] \n"// v20 v21 = r216 r217
"fmla v11.4s, v18.4s, %16.s[2] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r22
"fmla v8.4s, v17.4s, %16.s[3] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r210
"fmla v9.4s, v19.4s, %16.s[3] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r23
"fmla v10.4s, v22.4s, %17.s[0] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r211
"fmla v11.4s, v25.4s, %17.s[0] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r24
"fmla v8.4s, v23.4s, %17.s[1] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r212
"fmla v9.4s, v26.4s, %17.s[1] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v12.4s, v13.4s}, [%5], #32 \n"// v12 v13 = r30 r31
"fmla v10.4s, v24.4s, %17.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v14.4s, v15.4s}, [%5], #32 \n"// v14 v15 = r38 r39
"fmla v11.4s, v27.4s, %17.s[2] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v20.4s, v21.4s}, [%5] \n"// v20 v21 = r316 r317
"fmla v9.4s, v14.4s, %17.s[3] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r32
"fmla v10.4s, v13.4s, %18.s[0] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r310
"fmla v11.4s, v15.4s, %18.s[0] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r33
"fmla v8.4s, v22.4s, %18.s[1] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r311
"fmla v9.4s, v25.4s, %18.s[1] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r34
"fmla v10.4s, v23.4s, %18.s[2] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r312
"fmla v11.4s, v26.4s, %18.s[2] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v16.4s, v17.4s}, [%6], #32 \n"// v16 v17 = r40 r41
"fmla v8.4s, v24.4s, %18.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v18.4s, v19.4s}, [%6], #32 \n"// v18 v19 = r48 r49
"fmla v9.4s, v27.4s, %18.s[3] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v20.4s, v21.4s}, [%6] \n"// v20 v21 = r416 r417
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r42
"fmla v8.4s, v17.4s, %19.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r410
"fmla v9.4s, v19.4s, %19.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r43
"fmla v10.4s, v22.4s, %19.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r411
"fmla v11.4s, v25.4s, %19.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r44
"fmla v8.4s, v23.4s, %19.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r412
"fmla v9.4s, v26.4s, %19.s[3] \n"
"fmla v10.4s, v24.4s, %20.s[0] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"fmla v11.4s, v27.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"fadd v10.4s, v8.4s, v10.4s \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
"sub %2, %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmov q8, %q21 \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vmov d26, d25 \n"// q13 = r09 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r02
"vmla.f32 q8, q11, %e14[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r03
"vmla.f32 q9, q14, %f14[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r04
"vmla.f32 q8, q15, %f14[1] \n"
// r1
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"// q10 q11 = r10 r11
"vmla.f32 q9, q14, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d24-d25}, [%3] \n"// q12 = r18 x x
"vmla.f32 q8, q10, %e15[1] \n"
"vmov d26, d25 \n"// q13 = r19 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r12
"vmla.f32 q9, q11, %f15[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r13
"vmla.f32 q8, q14, %f15[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r14
"vmla.f32 q9, q15, %e16[0] \n"
// r2
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4]! \n"// q10 q11 = r20 r21
"vmla.f32 q8, q14, %e16[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d24-d25}, [%4] \n"// q12 = r28 x x
"vmla.f32 q9, q10, %f16[0] \n"
"vmov d26, d25 \n"// q13 = r29 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r22
"vmla.f32 q8, q11, %f16[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r23
"vmla.f32 q9, q14, %e17[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r24
"vmla.f32 q8, q15, %e17[1] \n"
// r3
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5]! \n"// q10 q11 = r30 r31
"vmla.f32 q9, q14, %f17[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d24-d25}, [%5] \n"// q12 = r38 x x
"vmla.f32 q8, q10, %f17[1] \n"
"vmov d26, d25 \n"// q13 = r39 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r32
"vmla.f32 q9, q11, %e18[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r33
"vmla.f32 q8, q14, %e18[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r34
"vmla.f32 q9, q15, %f18[0] \n"
// r4
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6]! \n"// q10 q11 = r40 r41
"vmla.f32 q8, q14, %f18[1] \n"
"pld [%6, #128] \n"
"vld2.f32 {d24-d25}, [%6] \n"// q12 = r48 x x
"vmla.f32 q9, q10, %e19[0] \n"
"vmov d26, d25 \n"// q13 = r49 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r42
"vmla.f32 q8, q11, %e19[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r43
"vmla.f32 q9, q14, %f19[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r44
"vmla.f32 q8, q15, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmla.f32 q9, q14, %e20[0] \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"vadd.f32 q9, q8, q9 \n"
"vmov q8, %q21 \n"
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
utils.h | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDQUANTUM_UTILS_H_
#define MINDQUANTUM_UTILS_H_
#ifdef ENABLE_OPENMP
# include <omp.h>
#endif // ENABLE_OPENMP // NOLINT
#include <x86intrin.h>
#include <complex>
#include <ctime>
#include <map>
#include <numeric>
#include <random>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "core/type.h"
namespace mindquantum {
extern const VT<CT<MT>> POLAR;
template <typename T, typename ST>
CT<T> ComplexInnerProduct(const ST *v1, const ST *v2, Index len) {
// len is (1UL>>n_qubits)*2
ST real_part = 0;
ST imag_part = 0;
auto size = len / 2;
#pragma omp parallel for reduction(+ : real_part, imag_part)
for (Index i = 0; i < size; i++) {
real_part += v1[2 * i] * v2[2 * i] + v1[2 * i + 1] * v2[2 * i + 1];
imag_part += v1[2 * i] * v2[2 * i + 1] - v1[2 * i + 1] * v2[2 * i];
}
CT<T> result = {static_cast<T>(real_part), static_cast<T>(imag_part)};
return result;
}
template <typename T, typename ST>
CT<T> ComplexInnerProductWithControl(const ST *v1, const ST *v2, Index len, Index ctrlmask) {
// len is (1UL>>n_qubits)*2
ST real_part = 0;
ST imag_part = 0;
auto size = len / 2;
#pragma omp parallel for reduction(+ : real_part, imag_part)
for (Index i = 0; i < size; i++) {
if ((i & ctrlmask) == ctrlmask) {
real_part += v1[2 * i] * v2[2 * i] + v1[2 * i + 1] * v2[2 * i + 1];
imag_part += v1[2 * i] * v2[2 * i + 1] - v1[2 * i + 1] * v2[2 * i];
}
}
CT<T> result = {static_cast<T>(real_part), static_cast<T>(imag_part)};
return result;
}
Index GetControlMask(const VT<Index> &ctrls);
PauliMask GetPauliMask(const VT<PauliWord> &pws);
// inline int CountOne(uint32_t n) { return __popcntd(n); }
// inline int CountOne(uint64_t n) { return __popcntq(n);}
inline int CountOne(uint32_t n) {
int result;
asm("popcnt %1,%0" : "=r"(result) : "r"(n));
return result;
}
inline int CountOne(int64_t n) {
uint32_t *p = reinterpret_cast<uint32_t *>(&n);
return CountOne(p[0]) + CountOne(p[1]);
}
// inline int CountOne(uint64_t n) {
// uint8_t *p = reinterpret_cast<uint8_t *>(&n);
// return POPCNTTABLE[p[0]] + POPCNTTABLE[p[1]] + POPCNTTABLE[p[2]] +
// POPCNTTABLE[p[3]] + POPCNTTABLE[p[4]] + POPCNTTABLE[p[5]] +
// POPCNTTABLE[p[6]] + POPCNTTABLE[p[7]];
// }
// inline int CountOne(uint32_t n) {
// uint8_t *p = reinterpret_cast<uint8_t *>(&n);
// return POPCNTTABLE[p[0]] + POPCNTTABLE[p[1]] + POPCNTTABLE[p[2]] +
// POPCNTTABLE[p[3]];
// }
template <typename T>
PauliTerm<T> GenerateRandomPauliTerm(Index n_qubits) {
std::default_random_engine e(std::clock());
std::uniform_real_distribution<T> ut(-1.0, 1.0);
auto coeff = ut(e);
std::uniform_int_distribution<char> uit(0, 3);
VT<PauliWord> pws;
for (Index i = 0; i < n_qubits; i++) {
auto p = uit(e);
if (p != 3) {
pws.push_back(std::make_pair(i, (p + 'X')));
}
}
return std::make_pair(pws, coeff);
}
template <typename T>
void ShowPauliTerm(const PauliTerm<T> &pt) {
std::cout << pt.second << " [";
for (Index i = 0; i < static_cast<Index>(pt.first.size()); i++) {
auto &pw = pt.first[i];
std::cout << pw.second << pw.first;
if (i != static_cast<Index>(pt.first.size()) - 1) {
std::cout << " ";
}
}
std::cout << "]" << std::endl;
}
TimePoint NOW();
int TimeDuration(TimePoint start, TimePoint end);
template <typename T>
void PrintVec(T *vec, size_t len) {
auto cvec = reinterpret_cast<CTP<T>>(vec);
for (size_t i = 0; i < len / 2; i++) {
std::cout << cvec[i] << std::endl;
}
}
} // namespace mindquantum
#endif // MINDQUANTUM_UTILS_H_
|
opencl_odf_aes_fmt_plug.c | /* Modified by Dhiru Kholia <dhiru at openwall.com> for ODF AES format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_odf_aes;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_odf_aes);
#else
#include <string.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "misc.h"
#include "options.h"
#include "common.h"
#include "formats.h"
#include "common-opencl.h"
#include "sha2.h"
#define FORMAT_LABEL "ODF-AES-opencl"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA256 OpenCL AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(odf_cpu_salt)
typedef struct {
uint32_t length;
uint8_t v[32]; // hash of password
} odf_password;
typedef struct {
uint32_t v[32/4];
} odf_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint8_t length;
uint8_t salt[64];
} odf_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
typedef struct {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int content_length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} odf_cpu_salt;
static odf_cpu_salt *cur_salt;
static struct fmt_tests tests[] = {
{"$odf$*1*1*1024*32*61802eba18eab842de1d053809ba40927fd40b26c69ddeca6a8a652ed9c16a28*16*c5c0815b931f313627100d592a9c972f*16*e9a48b7daff738deaabe442007fb2ec4*0*be3b65ea09642c2b4fdc23e553e1f5304bc5df222b624c6373d53e674f5df01fdb8873cdab7a5a685fa45ad5441a9d8869401b7fa076c488ad53fd9971e97244ecc9416484450d4fb2ee4ec08af4044d7def937e6545dea2ce36bd5c57b1f46b11b9cf90c8fb3accff149ce2d54820b181b9124db9aac131f6436d77cf716423f04d42438eed6f9ca14bd24b9b17d3478176addd5fa0254bf986fccd879e326485790e28b94ad5306868734b5ac1b1ddb3f876382dee6e9428e8230e84bf11b7e85ccbae8b4b424cd73160c380f874b37fbe3c7e88c13ef4bde74b56507d17095c2c32bb8bcded0637e4403107bb33252f72f5886a91b7720fe32a8659a09c217717e4c74a7c2e09fc40b46aa288309a36e86b9f1856e1bce176bc9690555431e05c7b67ff95df64f8f40053079bfc9dda021ab2714fecf74398b867ebef675958f29eaa15eb631845e358a0c5caff0b824a2a69a6eabee069d3d6236d77709fd60438c9e3ad9e42b26810375e1e587eff105ac295327ef8bf66f6462388b7727ec32d6abde2f8d6126b185124bb437753663f6ab1f321ddfdb36d9f1f528729492e0b1bb8d3b9eda3c86c1997c92b902f5160f77587c37e45b5c133b5d9709fea910a2e9b54c0960b0ebc870cdbb858aabe07ed27cba86d29a7e64c6e3863131859314a14e64c1168d4a2d5ca0697853fb1fe969ba968e31359881d51edce287eff415de8e60cec2068bb82157fbcf0cf9a95e92cb23f32e6156daced4bee6ba8c8b41174d01fcd7662911bcc10d5b4478f8209ce3b91075d10529780be4f17e841a1f1833d432c3dc854908643e58b03c8860dfbc710a29f79f75ea262cfcef9cd67fb67d73f55b300d42f4577445af2b9f224620204cfb88de2cbf57931ac0e0f8d98259a41d744cad6a58abc7761c266f4e93aca19356b07073c09ae9d1976f4f2e1a76c350cc7764c27ae257eb69ba4213dd0a7794fa83d220439a398efd988b6dbf0de4c08bc3e4830c9e482b9e0fd1679f14e6f132cf06bae1d763dde7ce6f525ff9a0ebad28aeca16496194f2a6263a20e7afeb43d83c8c936130d6508f2bf68b5ca50375948424193a7fb1106fdf63ff72896e1b2633907f01a693218e3303436542bcf2af24cc4a41621c36768ce9a84d32cc9f3c2b108bfc78c25b1c2ea94e6e0d65406f78bdb8bc33c94a9550e5cc3e995cfbd31da03afb929418acdc89b099415f9bdb7dab7a75d44a696e14b031d601ad8d907e14a28044706c0c2955df2cb34ffea82af367e487b6cc928dc87a33fc7555173e7faa5cfd1af6d3d6f496f23a9579db22dd4a2c16e950fdc90696d95a81183765a4fbddb42c488d40ac1de28483cf1cdddf821d3f859c57b13cb7f21a916bd0d89438a17634c68637f23e2544589e8ae5ee5bced91680c087cb3105cd74a09e88d3aae17d75e", "test"},
/* CMIYC 2013 "pro" hard hash */
{"$odf$*1*1*1024*32*7db40092b3857fa319bc0d717b60cefc40b1d51ef92ebc893c518ffebffdf200*16*5f7c8ab6e5d1c41dbd23c384fee957ed*16*9ff092f2dd29dab6ce5fb43ad7bbdd5a*0*bac8343436715b40aaf4690a7dc57b0f82b8f25f8ad0f9833e32468410d4dd02e387a067872b5847adc9a276c86a03113e11b903854202eec361c5b7ba74bcb254a4f76d97ca45dbe30fe49f78ce9cf7df0246ae4524b8f13ad28357838559c116d9ed59267f4df91da3ea9758c132e2ebc40fd4ee8e9978921a0847d7ca5c30ef911e0b88f9fc84039633eacf5e023c82dd1a573abd7663b8f36a039d42ed91b4a0665902f174be8cefefd367ba9b5da95768550e567242f1b2e2c3866eb8aa3c12d0b34277929616319ea29dd9a3b9addb963d45c7d4c2b54a99b0c1cf24cac3e981ed4e178e621938b83be30f54d37d6425a0b7ac9dff5504830fe1d1f136913c32d8f732eb55e6179ad2699fd851af3a44f8ca914117344e6fadf501bf6f6e0ae7970a2b58eb3af0d89c78411c6adde8aa1f0e8b69c261fd04835cdc3ddf0a6d67ddff33995b5cc7439db83f90c8a2e07e2513771fffcf8b55ce1a382b14ffbf22be9bdd6f83a9b7602995c9793dfffb32c9eb16930c0bb55e5a8364fa06a59fca5af27df4a02565db2b4718ed44405f67a052738692c189039a7fd63713207616eeeebace3c0a3963dd882c485523f49fa0bc2663fc6ef090a220dd5c6554bc0702da8c3122383ea8a009837d549d58ad688c9cc4b8461fe70f4600539cd1d82edd4e110b1c1472dae40adc3126e2a09dd2753dcd83799841745160e235652f601d1257268321f22d19bd9dc811afaf143765c7cb53717ea329e9e4064a3cf54b33d006e93b83102e2ad3327f6d995cb598bd96466b1287e6da9967f4f034c63fd06c6e5c7ec25008c122385f271d18918cff3823f9fbdb37791e7371ce1d6a4ab08c12eca5fceb7c9aa7ce25a8bd640a68c622ddd858973426cb28e65c4c3421b98ebf4916b8c2bfe71b2afec4ab2f99291a4c4d3312521850d46436aecd9e2e93a8619dbc3c1caf4507bb488ce921cd8d13a1640e6c49403e0416924b3b1a01c9939c7bcdec50f057d6f4dccf0afc8c2ad37c4f8429c77cf19ad49db5e5219e965a3ed5d56d799689bd93642602d7959df0493ea62cccff83e66d85bf45d6b5b03e8cfca84daf37ecfccb60f85f3c5102900a02a5df015b1bf1ef55dfb2ab20321bcf3325d1adce22d4456837dcc589ef36d4f06ccdcc96ef10ff806d76f0044e92e192b946ae0f09860a38c2a6052fe84c3e9bb9380e2b344812376c6bbd5c9858745dbd072798a3d7eff31ae5d509c11b5269ec6f2108cb6e72a5ab495ea7aed5bf3dabedbb517dc4ceff818a8e890a6ea9a91bab37e8a463a9d04993c5ba7e40e743e033842540806d4a65258d0f4d5988e1e0011f0e85fcae3b2819c1f17f5c7980ecd87aee425cdab4f34bfb7a31ee7936c60f2f4f52aea67aef4736a419dc9c559279b569f61995eb2d6b7c204c3e9f56ca5c8a889812a30c33", "juNK^r00M!"},
{NULL}
};
static cl_int cl_error;
static odf_password *inbuffer;
static odf_hash *outbuffer;
static odf_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(odf_password) * gws;
outsize = sizeof(odf_hash) * gws;
settingsize = sizeof(odf_salt);
cracked_size = sizeof(*crypt_out) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
saved_key = mem_calloc(gws, sizeof(*saved_key));
crypt_out = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(odf_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res;
if (strncmp(ciphertext, "$odf$*", 6))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 1) {
goto err;
}
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* something */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
res = strlen(p);
if (res > 2048 || res & 1)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static odf_cpu_salt cs;
ctcopy += 6; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
p = strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
memset(cs.content, 0, sizeof(cs.content));
for (i = 0; p[i * 2] && i < 1024; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
cs.content_length = i;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (odf_cpu_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length);
currentsalt.length = cur_salt->salt_length;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->key_size;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#undef set_key
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
unsigned char hash[32];
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));
SHA256_Final((unsigned char *)hash, &ctx);
memcpy(inbuffer[index].v, hash, 32);
inbuffer[index].length = 32;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
AES_KEY akey;
unsigned char iv[32];
SHA256_CTX ctx;
unsigned char pt[1024];
memcpy(iv, cur_salt->iv, 32);
memset(&akey, 0, sizeof(AES_KEY));
AES_set_decrypt_key((unsigned char*)outbuffer[index].v, 256, &akey);
AES_cbc_encrypt(cur_salt->content, pt, cur_salt->content_length, &akey, iv, AES_DECRYPT);
SHA256_Init(&ctx);
SHA256_Update(&ctx, pt, cur_salt->content_length);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* The format tests all have iteration count 1024.
* Just in case the iteration count is tunable, let's report it.
*/
static unsigned int iteration_count(void *salt)
{
odf_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_odf_aes = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
4,
SALT_SIZE,
4,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
cnn.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/time.h>
#include <string.h>
#include <inttypes.h>
// Include SSE intrinsics
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
#include <immintrin.h>
#include <x86intrin.h>
#endif
// Include OpenMP
#include <omp.h>
// Helper functions -----------------------------------------------------------
/*
* Get a current timestamp with us accuracy. This will give you the time that
* has passed since a certain point in time. While the value itself doesn't
* tell you much, you can subtract timestamps from each other to get the
* amount of time that has passed between them.
*/
static inline uint64_t timestamp_us() {
struct timeval tv;
gettimeofday(&tv,NULL);
return 1000000L * tv.tv_sec + tv.tv_usec;
}
// Vol ------------------------------------------------------------------------
// Volumes are used to represent the activations (i.e., state) between the
// different layers of the CNN. They all have three dimensions. The inter-
// pretation of their content depends on the layer that produced them. Before
// the first iteration, the Volume holds the data of the image we want to
// classify (the depth are the three color dimensions). After the last stage
// of the CNN, the Volume holds the probabilities that an image is part of
// a specific category.
/*
* Represents a three-dimensional array of numbers, and its size. The numbers
* at (x,y,d) are stored in array w at location ((v->sx * y)+x)*v->depth+d.
*/
typedef struct vol {
uint64_t sx,sy,depth;
double* w;
} vol_t;
/*
* Set the value at a specific entry of the array.
*/
static inline double get_vol(vol_t* v, int x, int y, int d) {
return v->w[((v->sx * y)+x)*v->depth+d];
}
/*
* Get the value at a specific entry of the array.
*/
static inline void set_vol(vol_t* v, int x, int y, int d, double val) {
v->w[((v->sx * y)+x)*v->depth+d] = val;
}
/*
* Allocate a new array with specific dimensions and default value v.
*/
static vol_t* make_vol(int sx, int sy, int d, double v) {
vol_t* out = (vol_t*)malloc(sizeof(struct vol));
out->w = (double*)malloc(sizeof(double)*(sx*sy*d));
out->sx = sx;
out->sy = sy;
out->depth = d;
for (int x = 0; x < sx; x++)
for (int y = 0; y < sy; y++)
for (int z = 0; z < d; z++)
set_vol(out, x, y, z, v);
return out;
}
/*
* Copy the contents of one Volume to another (assuming same dimensions).
*/
static vol_t* copy_vol(vol_t* dest, vol_t* src) {
for (int x = 0; x < dest->sx; x++)
for (int y = 0; y < dest->sy; y++)
for (int z = 0; z < dest->depth; z++)
set_vol(dest, x, y, z, get_vol(src, x, y, z));
}
/*
* Deallocate the array.
*/
void free_vol(vol_t* v) {
free(v->w);
free(v);
}
// A note about layers --------------------------------------------------------
/*
* What follows are the different layers of the CNN. You will not have to
* understand what these layers are actually doing. In general terms, each
* layer performs a "forward" operation on a batch of inputs. During this
* forward operation, the layer takes a set of input Volumes and transforms
* them into a set of output Volumes (one output for each input). What differs
* is the operation performed by each layer.
*
* In addition to the _forward function, each layer also provides a data
* structure, holding (fixed) parameters for that layer, a make_ function to
* allocate an instance of the layer with a particular set of parameters and
* a load function to load training data for that layer from a file. Note that
* you will not have to make any changes to any of these functions. The only
* function you need to consider is the _forward function.
*/
// Convolutional Layer --------------------------------------------------------
typedef struct conv_layer {
// required
int out_depth;
int sx;
int in_depth;
int in_sx;
int in_sy;
// optional
int sy;
int stride;
int pad;
double l1_decay_mul;
double l2_decay_mul;
// computed
int out_sx;
int out_sy;
double bias;
vol_t* biases;
vol_t** filters;
} conv_layer_t;
conv_layer_t* make_conv_layer(int in_sx, int in_sy, int in_depth,
int sx, int filters, int stride, int pad) {
conv_layer_t* l = (conv_layer_t*)malloc(sizeof(conv_layer_t));
// required
l->out_depth = filters;
l->sx = sx;
l->in_depth = in_depth;
l->in_sx = in_sx;
l->in_sy = in_sy;
// optional
l->sy = l->sx;
l->stride = stride;
l->pad = pad;
l->l1_decay_mul = 0.0;
l->l2_decay_mul = 1.0;
// computed
l->out_sx = floor((l->in_sx + l->pad * 2 - l->sx) / l->stride + 1);
l->out_sy = floor((l->in_sy + l->pad * 2 - l->sy) / l->stride + 1);
l->filters = (vol_t**)malloc(sizeof(vol_t*)*filters);
for (int i = 0; i < filters; i++) {
l->filters[i] = make_vol(l->sx, l->sy, l->in_depth, 0.0);
}
l->bias = 0.0;
l->biases = make_vol(1, 1, l->out_depth, l->bias);
return l;
}
double return_val(int f_depth, double* V_w, double* f_w, int V_w_index, int f_w_index, double* V_w_sum, double* f_w_sum, double val) {
if (f_depth == 3) {
val += f_w[f_w_index] * V_w[V_w_index] + f_w[f_w_index+1] * V_w[V_w_index+1] + f_w[f_w_index+2] * V_w[V_w_index+2];
}
else if (f_depth == 16) {
__m256d sum = _mm256_setzero_pd();
V_w_sum = V_w + V_w_index;
f_w_sum = f_w + f_w_index;
__m256d v_vector = _mm256_loadu_pd(V_w_sum); // load v vector
__m256d f_vector = _mm256_loadu_pd(f_w_sum); // load f vector
__m256d f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+4); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+4); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+8); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+8); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+12); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+12); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
val += sum[0] + sum[1] + sum[2] + sum[3];
}
else if (f_depth == 20) {
__m256d sum = _mm256_setzero_pd();
V_w_sum = V_w + V_w_index;
f_w_sum = f_w + f_w_index;
__m256d v_vector = _mm256_loadu_pd(V_w_sum); // load v vector
__m256d f_vector = _mm256_loadu_pd(f_w_sum); // load f vector
__m256d f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+4); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+4); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+8); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+8); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+12); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+12); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
v_vector = _mm256_loadu_pd(V_w_sum+16); // load v vector
f_vector = _mm256_loadu_pd(f_w_sum+16); // load f vector
f_times_v = _mm256_mul_pd(f_vector, v_vector); // multiply f vector and v vector
sum = _mm256_add_pd(sum, f_times_v); // add vectors
val += sum[0] + sum[1] + sum[2] + sum[3];
}
return val;
}
void conv_forward(conv_layer_t* l, vol_t** in, vol_t** out, int start, int end) {
int l_out_sy = l->out_sy;
int l_out_sx = l->out_sx;
vol_t* l_biases = l->biases;
int l_out_depth = l->out_depth;
double* V_w_sum;
double* f_w_sum;
int y;
int x;
vol_t* V = in[0];
vol_t* A = out[0];
int V_sx = V->sx;
int V_sy = V->sy;
int V_depth = V->depth;
double* V_w = V->w;
int d;
int ay;
int ax;
int fy;
int fx;
double val;
int oy;
int V_sum;
int f_sum;
int ox;
int V_w_index;
int f_w_index;
int f_sx;
int f_sy;
int f_depth;
double* f_w;
double end_add;
for(d = 0; d < l_out_depth; d++) {
vol_t* f = l->filters[d];
y = -2;
f_sx= f->sx;
f_sy = f->sy;
f_depth = f->depth;
f_w = f->w;
end_add = l_biases->w[d];
//printf("%lf\n", end_add);
for(ay = 0; ay < l_out_sy; y += 1, ay++) {
x = -2;
for(ax = 0; ax < l_out_sx; x += 1, ax++) {
val = 0.0;
for(fy = 0; fy < f_sy; fy++) {
oy = y + fy;
if (oy > -1 && oy < V_sy) {
V_sum = V_sx * oy;
f_sum = f_sx * fy;
for(fx = 0; fx < f_sx; fx++) {
ox = x + fx;
if(ox > -1 && ox < V_sx) {
V_w_index = ((V_sum)+ox)*V_depth;
f_w_index = ((f_sum)+fx)*f_depth;
val = return_val(f_depth, V_w, f_w, V_w_index, f_w_index, V_w_sum, f_w_sum, val); // SIMD
}
}
}
}
val += end_add;
set_vol(A, ax, ay, d, val);
}
}
}
}
void conv_load(conv_layer_t* l, const char* fn) {
int sx, sy, depth, filters;
FILE* fin = fopen(fn, "r");
fscanf(fin, "%d %d %d %d", &sx, &sy, &depth, &filters);
assert(sx == l->sx);
assert(sy == l->sy);
assert(depth == l->in_depth);
assert(filters == l->out_depth);
for(int d = 0; d < l->out_depth; d++)
for (int x = 0; x < sx; x++)
for (int y = 0; y < sy; y++)
for (int z = 0; z < depth; z++) {
double val;
fscanf(fin, "%lf", &val);
set_vol(l->filters[d], x, y, z, val);
}
for(int d = 0; d < l->out_depth; d++) {
double val;
fscanf(fin, "%lf", &val);
set_vol(l->biases, 0, 0, d, val);
}
fclose(fin);
}
// Relu Layer -----------------------------------------------------------------
typedef struct relu_layer {
// required
int in_depth;
int in_sx;
int in_sy;
// computed
int out_depth;
int out_sx;
int out_sy;
} relu_layer_t;
relu_layer_t* make_relu_layer(int in_sx, int in_sy, int in_depth) {
relu_layer_t* l = (relu_layer_t*)malloc(sizeof(relu_layer_t));
// required
l->in_depth = in_depth;
l->in_sx = in_sx;
l->in_sy = in_sy;
// computed
l->out_sx = l->in_sx;
l->out_sy = l->in_sy;
l->out_depth = l->in_depth;
return l;
}
void relu_forward(relu_layer_t* l, vol_t** in, vol_t** out, int start, int end) {
int in_sx_sy_depth = l->in_sx*l->in_sy*l->in_depth;
for (int j = start; j <= end; j++) {
double* out_j_w = out[j]->w;
double* in_j_w = in[j]->w;
for (int i = 0; i < in_sx_sy_depth; i++) {
out_j_w[i] = (in_j_w[i] < 0.0) ? 0.0 : in_j_w[i];
}
}
}
// Pool Layer -----------------------------------------------------------------
typedef struct pool_layer {
// required
int sx;
int in_depth;
int in_sx;
int in_sy;
// optional
int sy;
int stride;
int pad;
// computed
int out_depth;
int out_sx;
int out_sy;
} pool_layer_t;
pool_layer_t* make_pool_layer(int in_sx, int in_sy, int in_depth,
int sx, int stride) {
pool_layer_t* l = (pool_layer_t*)malloc(sizeof(pool_layer_t));
// required
l->sx = sx;
l->in_depth = in_depth;
l->in_sx = in_sx;
l->in_sy = in_sy;
// optional
l->sy = l->sx;
l->stride = stride;
l->pad = 0;
// computed
l->out_depth = in_depth;
l->out_sx = floor((l->in_sx + l->pad * 2 - l->sx) / l->stride + 1);
l->out_sy = floor((l->in_sy + l->pad * 2 - l->sy) / l->stride + 1);
return l;
}
void pool_forward(pool_layer_t* l, vol_t** in, vol_t** out, int start, int end) {
for (int i = start; i <= end; i++) {
vol_t* V = in[i];
vol_t* A = out[i];
int n=0;
int l_pad = -l->pad;
int V_sx = V->sx;
int V_sy = V->sy;
int l_out_sy = l->out_sy;
int l_out_sx = l->out_sx;
int xy_stride = l->stride;
int l_sx = l->sx;
int l_sy = l->sy;
int l_out_depth = l->out_depth;
for(int d=0; d < l_out_depth; d++) {
int x = l_pad;
int y = l_pad;
for(int ax=0; ax < l_out_sx; x += xy_stride,ax++) {
y = l_pad;
for(int ay=0; ay < l_out_sy; y += xy_stride,ay++) {
double a = -99999;
for(int fx=0; fx < l_sx; fx++) {
int oy;
int ox;
for(int fy=0; fy < l_sy; fy++) {
oy = y+fy;
ox = x+fx;
if(oy >= 0 && oy < V_sy && ox >= 0 && ox < V_sx) {
double v = get_vol(V, ox, oy, d);
if(v > a) { a = v; }
}
}
}
n++;
set_vol(A, ax, ay, d, a);
}
}
}
}
}
// FC Layer -------------------------------------------------------------------
typedef struct fc_layer {
// required
int out_depth;
int in_depth;
int in_sx;
int in_sy;
// optional
double l1_decay_mul;
double l2_decay_mul;
// computed
int out_sx;
int out_sy;
int num_inputs;
double bias;
vol_t* biases;
vol_t** filters;
} fc_layer_t;
fc_layer_t* make_fc_layer(int in_sx, int in_sy, int in_depth,
int num_neurons) {
fc_layer_t* l = (fc_layer_t*)malloc(sizeof(fc_layer_t));
// required
l->out_depth = num_neurons;
l->in_depth = in_depth;
l->in_sx = in_sx;
l->in_sy = in_sy;
// optional
l->l1_decay_mul = 0.0;
l->l2_decay_mul = 1.0;
// computed
l->num_inputs = l->in_sx * l->in_sy * l->in_depth;
l->out_sx = 1;
l->out_sy = 1;
l->filters = (vol_t**)malloc(sizeof(vol_t*)*num_neurons);
for (int i = 0; i < l->out_depth; i++) {
l->filters[i] = make_vol(1, 1, l->num_inputs, 0.0);
}
l->bias = 0.0;
l->biases = make_vol(1, 1, l->out_depth, l->bias);
return l;
}
void fc_forward(fc_layer_t* l, vol_t** in, vol_t** out, int start, int end) {
for (int j = start; j <= end; j++) {
double a;
double* l_biases_w = l->biases->w;
int l_out_depth = l->out_depth;
int l_num_inputs = l->num_inputs;
double* V_w = in[j]->w;
double* A_w = out[j]->w;
for(int i=0; i < l_out_depth; i++) {
a = 0.0;
double* l_filters_i_w = l->filters[i]->w;
for(int d=0; d < l_num_inputs; d++) {
a += V_w[d] * l_filters_i_w[d];
}
a += l_biases_w[i];
A_w[i] = a;
}
}
}
void fc_load(fc_layer_t* l, const char* fn) {
FILE* fin = fopen(fn, "r");
int num_inputs;
int out_depth;
fscanf(fin, "%d %d", &num_inputs, &out_depth);
assert(out_depth == l->out_depth);
assert(num_inputs == l->num_inputs);
for(int i = 0; i < l->out_depth; i++)
for(int d = 0; d < l->num_inputs; d++) {
double val;
fscanf(fin, "%lf", &val);
l->filters[i]->w[d] = val;
}
for(int i = 0; i < l->out_depth; i++) {
double val;
fscanf(fin, "%lf", &val);
l->biases->w[i] = val;
}
fclose(fin);
}
// Softmax Layer --------------------------------------------------------------
// Maximum supported out_depth
#define MAX_ES 16
typedef struct softmax_layer {
// required
int in_depth;
int in_sx;
int in_sy;
double* es;
// computed
int out_depth;
int out_sx;
int out_sy;
} softmax_layer_t;
softmax_layer_t* make_softmax_layer(int in_sx, int in_sy, int in_depth) {
softmax_layer_t* l = (softmax_layer_t*)malloc(sizeof(softmax_layer_t));
// required
l->in_depth = in_depth;
l->in_sx = in_sx;
l->in_sy = in_sy;
// computed
l->out_sx = 1;
l->out_sy = 1;
l->out_depth = l->in_sx * l->in_sy * l->in_depth;
l->es = (double*)malloc(sizeof(double)*l->out_depth);
return l;
}
void softmax_forward(softmax_layer_t* l, vol_t** in, vol_t** out, int start, int end) {
double es[MAX_ES];
int l_out_depth = l->out_depth;
for (int j = start; j <= end; j++) {
double* V_w = in[j]->w;
double* A_w = out[j]->w;
// compute max activation
double amax = V_w[0];
for(int i=1;i < l_out_depth; i++) {
if(V_w[i] > amax) amax = V_w[i];
}
// compute exponentials (carefully to not blow up)
double esum = 0.0;
for(int i=0; i < l_out_depth; i++) {
double e = exp(V_w[i] - amax);
esum += e;
es[i] = e;
}
// normalize and output to sum to one
for(int i=0; i < l_out_depth; i++) {
es[i] /= esum;
A_w[i] = es[i];
}
}
}
// Neural Network -------------------------------------------------------------
/*
* This represents the CNN we will use in this project. It consists of 11
* layers, which means that there are 12 volumes of data (where the first one
* is the input data and the last one the classification result).
*/
#define LAYERS 11
typedef struct network {
vol_t* v[LAYERS+1];
conv_layer_t* l0;
relu_layer_t* l1;
pool_layer_t* l2;
conv_layer_t* l3;
relu_layer_t* l4;
pool_layer_t* l5;
conv_layer_t* l6;
relu_layer_t* l7;
pool_layer_t* l8;
fc_layer_t* l9;
softmax_layer_t* l10;
} network_t;
/*
* Instantiate our specific CNN.
*/
network_t* make_network() {
network_t* net = (network_t*)malloc(sizeof(network_t));
net->v[0] = make_vol(32, 32, 3, 0.0);
net->l0 = make_conv_layer(32, 32, 3, 5, 16, 1, 2);
net->v[1] = make_vol(net->l0->out_sx, net->l0->out_sy, net->l0->out_depth, 0.0);
net->l1 = make_relu_layer(net->v[1]->sx, net->v[1]->sy, net->v[1]->depth);
net->v[2] = make_vol(net->l1->out_sx, net->l1->out_sy, net->l1->out_depth, 0.0);
net->l2 = make_pool_layer(net->v[2]->sx, net->v[2]->sy, net->v[2]->depth, 2, 2);
net->v[3] = make_vol(net->l2->out_sx, net->l2->out_sy, net->l2->out_depth, 0.0);
net->l3 = make_conv_layer(net->v[3]->sx, net->v[3]->sy, net->v[3]->depth, 5, 20, 1, 2);
net->v[4] = make_vol(net->l3->out_sx, net->l3->out_sy, net->l3->out_depth, 0.0);
net->l4 = make_relu_layer(net->v[4]->sx, net->v[4]->sy, net->v[4]->depth);
net->v[5] = make_vol(net->l4->out_sx, net->l4->out_sy, net->l4->out_depth, 0.0);
net->l5 = make_pool_layer(net->v[5]->sx, net->v[5]->sy, net->v[5]->depth, 2, 2);
net->v[6] = make_vol(net->l5->out_sx, net->l5->out_sy, net->l5->out_depth, 0.0);
net->l6 = make_conv_layer(net->v[6]->sx, net->v[6]->sy, net->v[6]->depth, 5, 20, 1, 2);
net->v[7] = make_vol(net->l6->out_sx, net->l6->out_sy, net->l6->out_depth, 0.0);
net->l7 = make_relu_layer(net->v[7]->sx, net->v[7]->sy, net->v[7]->depth);
net->v[8] = make_vol(net->l7->out_sx, net->l7->out_sy, net->l7->out_depth, 0.0);
net->l8 = make_pool_layer(net->v[8]->sx, net->v[8]->sy, net->v[8]->depth, 2, 2);
net->v[9] = make_vol(net->l8->out_sx, net->l8->out_sy, net->l8->out_depth, 0.0);
net->l9 = make_fc_layer(net->v[9]->sx, net->v[9]->sy, net->v[9]->depth, 10);
net->v[10] = make_vol(net->l9->out_sx, net->l9->out_sy, net->l9->out_depth, 0.0);
net->l10 = make_softmax_layer(net->v[10]->sx, net->v[10]->sy, net->v[10]->depth);
net->v[11] = make_vol(net->l10->out_sx, net->l10->out_sy, net->l10->out_depth, 0.0);
return net;
}
/*
* Free our specific CNN.
*/
void free_network(network_t* net) {
for (int i = 0; i < LAYERS+1; i++)
free_vol(net->v[i]);
free(net->l0);
free(net->l1);
free(net->l2);
free(net->l3);
free(net->l4);
free(net->l5);
free(net->l6);
free(net->l7);
free(net->l8);
free(net->l9);
free(net->l10);
free(net);
}
/*
* We organize data as "batches" of volumes. Each batch consists of a number of samples,
* each of which contains a volume for every intermediate layer. Say we have L layers
* and a set of N input images. Then batch[l][n] contains the volume at layer l for
* input image n.
*
* By using batches, we can process multiple images at once in each run of the forward
* functions of the different layers.
*/
typedef vol_t** batch_t;
/*
* This function allocates a new batch for the network old_net with size images.
*/
batch_t* make_batch(network_t* old_net, int size) {
batch_t* out = (batch_t*)malloc(sizeof(vol_t**)*(LAYERS+1));
for (int i = 0; i < LAYERS+1; i++) {
out[i] = (vol_t**)malloc(sizeof(vol_t*)*size);
for (int j = 0; j < size; j++) {
out[i][j] = make_vol(old_net->v[i]->sx, old_net->v[i]->sy, old_net->v[i]->depth, 0.0);
}
}
return out;
}
/*
* Free a previously allocated batch.
*/
void free_batch(batch_t* v, int size) {
for (int i = 0; i < LAYERS+1; i++) {
for (int j = 0; j < size; j++) {
free_vol(v[i][j]);
}
free(v[i]);
}
free(v);
}
/*
* Apply our network to a specific batch of inputs. The batch has to be given
* as input to v and start/end are the first and the last image in that batch
* to process (start and end are inclusive).
*/
// uint64_t conv_l1_time = 0;
// uint64_t relu_l1_time = 0;
// uint64_t pool_l1_time = 0;
// uint64_t conv_l2_time = 0;
// uint64_t relu_l2_time = 0;
// uint64_t pool_l2_time = 0;
// uint64_t conv_l3_time = 0;
// uint64_t relu_l3_time = 0;
// uint64_t pool_l3_time = 0;
// uint64_t fc_time = 0;
// uint64_t softmax_time = 0;
// uint64_t total_conv_time = 0;
// uint64_t total_relu_time = 0;
// uint64_t total_pool_time = 0;
void net_forward(network_t* net, batch_t* v, int start, int end) {
//uint64_t end_time;
//uint64_t start_time;
//start_time = timestamp_us();
conv_forward(net->l0, v[0], v[1], start, end);
//end_time = timestamp_us();
//conv_l1_time += end_time - start_time;
//start_time = timestamp_us();
relu_forward(net->l1, v[1], v[2], start, end);
//end_time = timestamp_us();
//relu_l1_time += end_time - start_time;
//start_time = timestamp_us();
pool_forward(net->l2, v[2], v[3], start, end);
//end_time = timestamp_us();
//pool_l1_time += end_time - start_time;
//start_time = timestamp_us();
conv_forward(net->l3, v[3], v[4], start, end);
//end_time = timestamp_us();
//conv_l2_time += end_time - start_time;
//start_time = timestamp_us();
relu_forward(net->l4, v[4], v[5], start, end);
//end_time = timestamp_us();
//relu_l2_time += end_time - start_time;
//start_time = timestamp_us();
pool_forward(net->l5, v[5], v[6], start, end);
//end_time = timestamp_us();
//pool_l2_time += end_time - start_time;
//start_time = timestamp_us();
conv_forward(net->l6, v[6], v[7], start, end);
//end_time = timestamp_us();
//conv_l3_time += end_time - start_time;
//start_time = timestamp_us();
relu_forward(net->l7, v[7], v[8], start, end);
//end_time = timestamp_us();
//relu_l3_time += end_time - start_time;
//start_time = timestamp_us();
pool_forward(net->l8, v[8], v[9], start, end);
//end_time = timestamp_us();
// pool_l3_time += end_time - start_time;
//start_time = timestamp_us();
fc_forward(net->l9, v[9], v[10], start, end);
//end_time = timestamp_us();
//fc_time += end_time - start_time;
//start_time = timestamp_us();
softmax_forward(net->l10, v[10], v[11], start, end);
//end_time = timestamp_us();
//softmax_time += end_time - start_time;
//total_conv_time = conv_l1_time + conv_l2_time + conv_l3_time;
//total_relu_time = relu_l1_time + relu_l2_time + relu_l3_time;
//total_pool_time = pool_l1_time + pool_l2_time + pool_l3_time;
}
/*
* Putting everything together: Take a set of n input images as 3-dimensional
* Volumes and process them using the CNN in batches of 1. Then look at the
* output (which is a set of 10 labels, each of which tells us the likelihood
* of a specific category) and classify the image as a cat iff the likelihood
* of "cat" is larger than 50%. Writes the cat likelihood for all images into
* an output array (0 = definitely no cat, 1 = definitely cat).
*/
#define CAT_LABEL 3
void net_classify_cats(network_t* net, vol_t** input, double* output, int n) {
/* Below is my larger batch implementation, its faster than batch size of 1 without openMP
but when i add in parallelization for some reason larger batches is slower than batch
size 1. I've played around with batch sizes and it is always slower...*/
// #pragma omp parallel
// {
// batch_t* batch = make_batch(net, 20);
// #pragma omp for
// for (int i = 0; i < n; i+=20) {
// for (int x = 0; x < 20; x++) {
// copy_vol(batch[0][x], input[i+x]);
// }
// net_forward(net, batch, 0, 19);
// for (int z = 0; z < 20; z++) {
// output[i] = batch[11][z]->w[CAT_LABEL];
// }
// }
// free_batch(batch, 20);
// }
#pragma omp parallel
{
batch_t* batch = make_batch(net, 1);
#pragma omp for
for (int i = 0; i < n; i+=1) {
copy_vol(batch[0][0], input[i]);
net_forward(net, batch, 0, 0);
output[i] = batch[11][0]->w[CAT_LABEL];
}
free_batch(batch, 1);
}
// batch_t* batch = make_batch(net, 1);
// for (int i = 0; i < n; i+=1) {
// copy_vol(batch[0][0], input[i]);
// net_forward(net, batch, 0, 0);
// output[i] = batch[11][0]->w[CAT_LABEL];
// }
// free_batch(batch, 1);
// #pragma omp parallel
// {
// batch_t* batch = make_batch(net, 1);
// // batch_t* batch2 = make_batch(net, 1);
// // batch_t* batch3 = make_batch(net, 1);
// // batch_t* batch4 = make_batch(net, 1);
// #pragma omp for
// for (int i = 0; i < n; i+=1) {
// copy_vol(batch[0][0], input[i]);
// net_forward(net, batch, 0, 0);
// output[i] = batch[11][0]->w[CAT_LABEL];
// // copy_vol(batch2[0][0], input[i+1]);
// // net_forward(net, batch2, 0, 0);
// // output[i+1] = batch2[11][0]->w[CAT_LABEL];
// // copy_vol(batch3[0][0], input[i+2]);
// // net_forward(net, batch3, 0, 0);
// // output[i+2] = batch3[11][0]->w[CAT_LABEL];
// // copy_vol(batch4[0][0], input[i+3]);
// // net_forward(net, batch4, 0, 0);
// // output[i+3] = batch4[11][0]->w[CAT_LABEL];
// // net_forward(net, batch5, 0, 0);
// // net_forward(net, batch6, 0, 0);
// // net_forward(net, batch7, 0, 0);
// // net_forward(net, batch8, 0, 0);
// // output[i+4] = batch5[11][0]->w[CAT_LABEL];
// // output[i+5] = batch6[11][0]->w[CAT_LABEL];
// // output[i+6] = batch7[11][0]->w[CAT_LABEL];
// // output[i+6] = batch8[11][0]->w[CAT_LABEL];
// }
// free_batch(batch, 1);
// // free_batch(batch2, 1);
// // free_batch(batch3, 1);
// // free_batch(batch4, 1);
// // free_batch(batch5, 1);
// // free_batch(batch6, 1);
// // free_batch(batch7, 1);
// // free_batch(batch8, 1);
// }
// uint64_t total_runtime = (conv_l1_time + relu_l1_time + pool_l1_time
// + conv_l2_time + relu_l2_time + pool_l2_time + conv_l3_time + relu_l3_time
// + pool_l3_time + fc_time + softmax_time);
// printf("TOTAL TIME:%" PRId64 "\n", total_runtime);
// printf("THE TIME IN CONV LEVEL 1 IS:%" PRId64 "\n", conv_l1_time);
// printf("THE PERCENT IN CONV LEVEL 1 IS:%f\n", (float) 100*conv_l1_time/total_runtime);
// printf("THE TIME IN RELU LEVEL 2 IS:%" PRId64 "\n", relu_l1_time);
// printf("THE PERCENT IN RELU LEVEL 2 IS:%f\n", (float) 100*relu_l1_time/total_runtime);
// printf("THE TIME IN POOL LEVEL 3 IS:%" PRId64 "\n", pool_l1_time);
// printf("THE PERCENT IN POOL LEVEL 3 IS:%f\n", (float) 100*pool_l1_time/total_runtime);
// printf("THE TIME IN CONV LEVEL 4 IS:%" PRId64 "\n", conv_l2_time);
// printf("THE PERCENT IN CONV LEVEL 4 IS:%f\n", (float) 100*conv_l2_time/total_runtime);
// printf("THE TIME IN RELU LEVEL 5 IS:%" PRId64 "\n", relu_l2_time);
// printf("THE PERCENT IN RELU LEVEL 5 IS:%f\n", (float) 100*relu_l2_time/total_runtime);
// printf("THE TIME IN POOL LEVEL 6 IS:%" PRId64 "\n", pool_l2_time);
// printf("THE PERCENT IN POOL LEVEL 6 IS:%f\n\n", (float) 100*pool_l2_time/total_runtime);
// printf("THE TIME IN CONV LEVEL 7 IS:%" PRId64 "\n", conv_l3_time);
// printf("THE PERCENT IN CONV LEVEL 7 IS:%f\n", (float) 100*conv_l3_time/total_runtime);
// printf("THE TIME IN RELU LEVEL 8 IS:%" PRId64 "\n", relu_l3_time);
// printf("THE PERCENT IN RELU LEVEL 8 IS:%f\n", (float) 100*relu_l3_time/total_runtime);
// printf("THE TIME IN POOL LEVEL 9 IS:%" PRId64 "\n", pool_l3_time);
// printf("THE PERCENT IN POOL LEVEL 9 IS:%f\n\n", (float) 100*pool_l3_time/total_runtime);
// printf("THE TIME IN FC LEVEL 10 IS:%" PRId64 "\n", fc_time);
// printf("THE PERCENT IN FC LEVEL 10 IS:%f\n", (float) 100*fc_time/total_runtime);
// printf("THE TIME IN SOFTMAX LEVEL 11 IS:%" PRId64 "\n", softmax_time);
// printf("THE PERCENT IN SOFTMAX LEVEL 11 IS:%f\n", (float) 100*softmax_time/total_runtime);
// printf("THE TOTAL PERCENT TIME IN CONV IS:%f\n", (float) 100*total_conv_time/total_runtime);
// printf("THE TOTAL PERCENT TIME IN RELU IS:%f\n", (float) 100*total_relu_time/total_runtime);
// printf("THE TOTAL PERCENT TIME IN POOL IS:%f\n", (float) 100*total_pool_time/total_runtime);
//free_batch(batch, 1);
}
// IGNORE EVERYTHING BELOW THIS POINT -----------------------------------------
// Including C files in other C files is very bad style and should be avoided
// in any real application. We do it here since we want everything that you
// may edit to be in one file, without having to fix the interfaces between
// the different components of the system.
#include "util.c"
#include "main.c" |
VerletClusterListsTest.h | /**
* @file VerletClusterListsTest.h
* @author nguyen
* @date 21.10.18
*/
#pragma once
#include <gtest/gtest.h>
#include "AutoPasTestBase.h"
#include "autopas/autopasIncludes.h"
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/verletClusterLists/traversals/VerletClustersColoringTraversal.h"
#include "autopas/particles/Particle.h"
#include "autopas/utils/WrapOpenMP.h"
#include "mocks/MockFunctor.h"
#include "mocks/MockVerletLists.h"
#include "testingHelpers/RandomGenerator.h"
#include "testingHelpers/commonTypedefs.h"
class VerletClusterListsTest : public AutoPasTestBase {};
#if defined(AUTOPAS_OPENMP)
class CollectParticlesPerThreadFunctor
: public autopas::Functor<autopas::Particle, autopas::FullParticleCell<autopas::Particle>> {
public:
static int _currentColor;
#pragma omp threadprivate(_currentColor)
std::array<std::vector<std::set<Particle *>>, 8> _particlesPerThreadPerColor;
public:
CollectParticlesPerThreadFunctor() : Functor(0) {}
void initTraversal() override {
for (int i = 0; i < 8; i++) {
_particlesPerThreadPerColor[i].resize(autopas::autopas_get_max_threads());
}
}
void AoSFunctor(Particle &i, Particle &j, bool newton3) override {
auto threadNum = autopas::autopas_get_thread_num();
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&i);
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&j);
}
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
static void nextColor(int newColor) { _currentColor = newColor; }
};
int CollectParticlesPerThreadFunctor::_currentColor = 0;
class ColoringTraversalWithColorChangeNotify
: public autopas::VerletClustersColoringTraversal<FPCell, CollectParticlesPerThreadFunctor,
autopas::DataLayoutOption::aos, true> {
public:
ColoringTraversalWithColorChangeNotify(CollectParticlesPerThreadFunctor *functor,
std::function<void(int)> whenColorChanges)
: autopas::VerletClustersColoringTraversal<FPCell, CollectParticlesPerThreadFunctor,
autopas::DataLayoutOption::aos, true>(functor) {
_whenColorChanges = std::move(whenColorChanges);
}
void notifyColorChange(unsigned long newColor) override { _whenColorChanges(newColor); }
private:
std::function<void(int)> _whenColorChanges;
};
#endif |
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _v = vld1q_f32(r0);
vst1q_f32(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
ex5.c | /*
*
* ** PROGRAM: A simple serial producer/consumer program
* **
* ** One function generates (i.e. produces) an array of random values.
* ** A second functions consumes that array and sums it.
* **
* ** HISTORY: Written by Tim Mattson, April 2007.
* */
#include <omp.h>
#ifdef APPLE
#include <stdlib.h>
#else
#include <malloc.h>
#endif
#include <stdio.h>
#define N 10000
/* Some random number constants from numerical recipies */
#define SEED 2531
#define RAND_MULT 1366
#define RAND_ADD 150889
#define RAND_MOD 714025
int randy = SEED;
/* function to fill an array with random numbers */
void fill_rand(int length, double *a)
{
int i;
for (i=0;i<length;i++) {
randy = (RAND_MULT * randy + RAND_ADD) % RAND_MOD;
*(a+i) = ((double) randy)/((double) RAND_MOD);
}
}
/* function to sum the elements of an array */
double Sum_array(int length, double *a)
{
int i; double sum = 0.0;
for (i=0;i<length;i++)
{
sum += *(a+i);
}
return sum;
}
int main()
{
double *A, sum, runtime;
int flag = 0, flg_temp;
A = (double *)malloc(N*sizeof(double));
runtime = omp_get_wtime();
#pragma omp parallel sections
{
#pragma omp section
{
fill_rand(N, A); // Producer: fill an array of data
#pragma omp flush
#pragma atomic write
flag = 1;
#pragma omp flush(flag)
}
#pragma omp section
{
#pragma omp flush(flag)
while(1)
{
#pragma omp flush(flag)
#pragma atomic read
flg_temp = flag;
if(flg_temp==1)break;
}
#pragma omp flush
sum = Sum_array(N, A); // Consumer: sum the array
}
}
runtime = omp_get_wtime() - runtime;
printf(" In %f seconds, The sum is %f \n",runtime,sum);
}
|
GB_unop__identity_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_uint32)
// op(A') function: GB (_unop_tran__identity_uint8_uint32)
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_uint32)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bget_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_int8
// A.*B function (eWiseMult): GB_AemultB__bget_int8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_int8
// C+=b function (dense accum): GB_Cdense_accumb__bget_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int8
// C=scalar+B GB_bind1st__bget_int8
// C=scalar+B' GB_bind1st_tran__bget_int8
// C=A+scalar GB_bind2nd__bget_int8
// C=A'+scalar GB_bind2nd_tran__bget_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITGET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITGET (x, y, int8_t, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT8 || GxB_NO_BGET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bget_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, int8_t, 8) ; \
}
GrB_Info GB_bind1st_tran__bget_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, int8_t, 8) ; \
}
GrB_Info GB_bind2nd_tran__bget_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB044-adi-tile-no.c | /**
* adi.c: This file is part of the PolyBench/C 3.2 test suite.
* Alternating Direction Implicit solver with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 10x1024x1024. */
#include "polybench/adi.h"
/* Array initialization. */
static void init_array(int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c3;
int c2;
int c4;
if (n >= 1) {
#pragma omp parallel for private(c1 ,c4 ,c2 ,c3 )
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
#pragma omp parallel for private(c2 ,c4 ,c3 )
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp parallel for private(c3 ,c4 )
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {
#pragma omp parallel for private(c4 )
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {
X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;
A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;
B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double X[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",X[i][j]);
if ((i * 500 + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_adi(int tsteps,int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i1;
//int i2;
//#pragma scop
{
int c0;
int c2;
int c8;
int c9;
int c15;
if (n >= 1 && tsteps >= 1) {
for (c0 = 0; c0 <= tsteps + -1; c0++) {
if (n >= 2) {
#pragma omp parallel for private(c2 ,c15 ,c9 ,c8 )
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];
}
}
}
}
}
#pragma omp parallel for private(c2 ,c15 )
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp parallel for
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];
}
}
if (n >= 2) {
#pragma omp parallel for private(c2 ,c15 ,c9 ,c8 )
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];
}
}
}
}
}
#pragma omp parallel for private(c2 ,c15 )
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp parallel for
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*X)[500 + 0][500 + 0];
X = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *X, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_adi(tsteps,n, *X, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *X);
/* Be clean. */
free(((void *)X));
;
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
diagsm_x_bsr_u_col.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows * A->block_size; ++r)
{
alpha_mul(y[index2(c, r, ldy)] , alpha , x[index2(c, r, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_unop__identity_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fc3.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "fc3.h"
#include <stdlib.h>
static void rotate_delta_fc2s(double (*rot_delta_fc2s)[3][3], const long i_atom,
const long j_atom,
const double (*delta_fc2s)[3][3],
const double (*site_sym_cart)[3][3],
const long *rot_map_sym, const long num_atom,
const long num_site_sym, const long num_disp);
static void tensor2_rotation(double rot_tensor[3][3], const double tensor[3][3],
const double r[3][3]);
static void tensor3_rotation(double *rot_tensor, const double *tensor,
const double *rot_cartesian);
static double tensor3_rotation_elem(const double *tensor, const double *r,
const long pos);
static void copy_permutation_symmetry_fc3_elem(double *fc3,
const double fc3_elem[27],
const long a, const long b,
const long c,
const long num_atom);
static void set_permutation_symmetry_fc3_elem(double *fc3_elem,
const double *fc3, const long a,
const long b, const long c,
const long num_atom);
static void set_permutation_symmetry_compact_fc3(
double *fc3, const long p2s[], const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom, const long n_patom);
static void transpose_compact_fc3_type01(double *fc3, const long p2s[],
const long s2pp[],
const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom, const long t_type);
static void transpose_compact_fc3_type2(double *fc3, const long p2s[],
const long s2pp[],
const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom);
void fc3_distribute_fc3(double *fc3, const long target, const long source,
const long *atom_mapping, const long num_atom,
const double *rot_cart) {
long i, j, adrs_out, adrs_in;
for (i = 0; i < num_atom; i++) {
for (j = 0; j < num_atom; j++) {
adrs_out = (num_atom * num_atom * target + num_atom * i + j) * 27;
adrs_in = (num_atom * num_atom * source +
num_atom * atom_mapping[i] + atom_mapping[j]) *
27;
tensor3_rotation(fc3 + adrs_out, fc3 + adrs_in, rot_cart);
}
}
}
void fc3_rotate_delta_fc2(double (*fc3)[3][3][3],
const double (*delta_fc2s)[3][3], const double *inv_U,
const double (*site_sym_cart)[3][3],
const long *rot_map_syms, const long num_atom,
const long num_site_sym, const long num_disp) {
long i_atoms, i, j, k, l, m, n;
double(*rot_delta_fc2s)[3][3];
rot_delta_fc2s =
(double(*)[3][3])malloc(sizeof(double[3][3]) * num_site_sym * num_disp);
for (i_atoms = 0; i_atoms < num_atom * num_atom; i_atoms++) {
i = i_atoms / num_atom;
j = i_atoms % num_atom;
rotate_delta_fc2s(rot_delta_fc2s, i, j, delta_fc2s, site_sym_cart,
rot_map_syms, num_atom, num_site_sym, num_disp);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
fc3[i_atoms][k][l][m] = 0;
for (n = 0; n < num_site_sym * num_disp; n++) {
fc3[i_atoms][k][l][m] +=
inv_U[k * num_site_sym * num_disp + n] *
rot_delta_fc2s[n][l][m];
}
}
}
}
}
free(rot_delta_fc2s);
rot_delta_fc2s = NULL;
}
void fc3_set_permutation_symmetry_fc3(double *fc3, const long num_atom) {
double fc3_elem[27];
long i, j, k;
#ifdef _OPENMP
#pragma omp parallel for private(j, k, fc3_elem)
#endif
for (i = 0; i < num_atom; i++) {
for (j = i; j < num_atom; j++) {
for (k = j; k < num_atom; k++) {
set_permutation_symmetry_fc3_elem(fc3_elem, fc3, i, j, k,
num_atom);
copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, i, j, k,
num_atom);
}
}
}
}
void fc3_set_permutation_symmetry_compact_fc3(
double *fc3, const long p2s[], const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom, const long n_patom) {
set_permutation_symmetry_compact_fc3(fc3, p2s, s2pp, nsym_list, perms,
n_satom, n_patom);
}
void fc3_transpose_compact_fc3(double *fc3, const long p2s[], const long s2pp[],
const long nsym_list[], const long perms[],
const long n_satom, const long n_patom,
const long t_type) {
/* Three types of index permutations */
/* t_type=0: dim[0] <-> dim[1] */
/* t_type=1: dim[0] <-> dim[2] */
/* t_type=2: dim[1] <-> dim[2] */
if (t_type == 0 || t_type == 1) {
transpose_compact_fc3_type01(fc3, p2s, s2pp, nsym_list, perms, n_satom,
n_patom, t_type);
} else {
if (t_type == 2) {
transpose_compact_fc3_type2(fc3, p2s, s2pp, nsym_list, perms,
n_satom, n_patom);
}
}
}
static void rotate_delta_fc2s(double (*rot_delta_fc2s)[3][3], const long i_atom,
const long j_atom,
const double (*delta_fc2s)[3][3],
const double (*site_sym_cart)[3][3],
const long *rot_map_sym, const long num_atom,
const long num_site_sym, const long num_disp) {
long i, j;
for (i = 0; i < num_disp; i++) {
for (j = 0; j < num_site_sym; j++) {
tensor2_rotation(
rot_delta_fc2s[i * num_site_sym + j],
delta_fc2s[i * num_atom * num_atom +
rot_map_sym[j * num_atom + i_atom] * num_atom +
rot_map_sym[j * num_atom + j_atom]],
site_sym_cart[j]);
}
}
}
static void tensor2_rotation(double rot_tensor[3][3], const double tensor[3][3],
const double r[3][3]) {
long i, j, k, l;
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
rot_tensor[i][j] = 0;
}
}
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
rot_tensor[i][j] += r[i][k] * r[j][l] * tensor[k][l];
}
}
}
}
}
static void tensor3_rotation(double *rot_tensor, const double *tensor,
const double *rot_cartesian) {
long l;
for (l = 0; l < 27; l++) {
rot_tensor[l] = tensor3_rotation_elem(tensor, rot_cartesian, l);
}
}
static double tensor3_rotation_elem(const double *tensor, const double *r,
const long pos) {
long i, j, k, l, m, n;
double sum;
l = pos / 9;
m = (pos % 9) / 3;
n = pos % 3;
sum = 0.0;
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
sum += r[l * 3 + i] * r[m * 3 + j] * r[n * 3 + k] *
tensor[i * 9 + j * 3 + k];
}
}
}
return sum;
}
static void copy_permutation_symmetry_fc3_elem(double *fc3,
const double fc3_elem[27],
const long a, const long b,
const long c,
const long num_atom) {
long i, j, k;
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
fc3[a * num_atom * num_atom * 27 + b * num_atom * 27 + c * 27 +
i * 9 + j * 3 + k] = fc3_elem[i * 9 + j * 3 + k];
fc3[a * num_atom * num_atom * 27 + c * num_atom * 27 + b * 27 +
i * 9 + k * 3 + j] = fc3_elem[i * 9 + j * 3 + k];
fc3[b * num_atom * num_atom * 27 + a * num_atom * 27 + c * 27 +
j * 9 + i * 3 + k] = fc3_elem[i * 9 + j * 3 + k];
fc3[b * num_atom * num_atom * 27 + c * num_atom * 27 + a * 27 +
j * 9 + k * 3 + i] = fc3_elem[i * 9 + j * 3 + k];
fc3[c * num_atom * num_atom * 27 + a * num_atom * 27 + b * 27 +
k * 9 + i * 3 + j] = fc3_elem[i * 9 + j * 3 + k];
fc3[c * num_atom * num_atom * 27 + b * num_atom * 27 + a * 27 +
k * 9 + j * 3 + i] = fc3_elem[i * 9 + j * 3 + k];
}
}
}
}
static void set_permutation_symmetry_fc3_elem(double *fc3_elem,
const double *fc3, const long a,
const long b, const long c,
const long num_atom) {
long i, j, k;
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
fc3_elem[i * 9 + j * 3 + k] =
(fc3[a * num_atom * num_atom * 27 + b * num_atom * 27 +
c * 27 + i * 9 + j * 3 + k] +
fc3[a * num_atom * num_atom * 27 + c * num_atom * 27 +
b * 27 + i * 9 + k * 3 + j] +
fc3[b * num_atom * num_atom * 27 + a * num_atom * 27 +
c * 27 + j * 9 + i * 3 + k] +
fc3[b * num_atom * num_atom * 27 + c * num_atom * 27 +
a * 27 + j * 9 + k * 3 + i] +
fc3[c * num_atom * num_atom * 27 + a * num_atom * 27 +
b * 27 + k * 9 + i * 3 + j] +
fc3[c * num_atom * num_atom * 27 + b * num_atom * 27 +
a * 27 + k * 9 + j * 3 + i]) /
6;
}
}
}
}
static void set_permutation_symmetry_compact_fc3(
double *fc3, const long p2s[], const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom, const long n_patom) {
/* fc3 shape=(n_patom, n_satom, n_satom, 3, 3, 3) */
/* 1D indexing: */
/* i * n_satom * n_satom * 27 + j * n_satom * 27 + */
/* k * 27 + l * 9 + m * 3 + n */
long i, j, k, l, m, n, i_p, j_p, k_p;
long done_any;
long i_trans_j, k_trans_j, i_trans_k, j_trans_k;
long adrs[6];
double fc3_elem[3][3][3];
char *done;
done = NULL;
done = (char *)malloc(sizeof(char) * n_patom * n_satom * n_satom);
for (i = 0; i < n_patom * n_satom * n_satom; i++) {
done[i] = 0;
}
for (i_p = 0; i_p < n_patom; i_p++) {
i = p2s[i_p];
for (j = 0; j < n_satom; j++) {
j_p = s2pp[j];
i_trans_j = perms[nsym_list[j] * n_satom + i];
for (k = 0; k < n_satom; k++) {
k_p = s2pp[k];
k_trans_j = perms[nsym_list[j] * n_satom + k];
i_trans_k = perms[nsym_list[k] * n_satom + i];
j_trans_k = perms[nsym_list[k] * n_satom + j];
/* ijk, ikj, jik, jki, kij, kji */
adrs[0] = i_p * n_satom * n_satom + j * n_satom + k;
adrs[1] = i_p * n_satom * n_satom + k * n_satom + j;
adrs[2] =
j_p * n_satom * n_satom + i_trans_j * n_satom + k_trans_j;
adrs[3] =
j_p * n_satom * n_satom + k_trans_j * n_satom + i_trans_j;
adrs[4] =
k_p * n_satom * n_satom + i_trans_k * n_satom + j_trans_k;
adrs[5] =
k_p * n_satom * n_satom + j_trans_k * n_satom + i_trans_k;
done_any = 0;
for (l = 0; l < 6; l++) {
if (done[adrs[l]]) {
done_any = 1;
break;
}
}
if (done_any) {
continue;
}
for (l = 0; l < 6; l++) {
done[adrs[l]] = 1;
adrs[l] *= 27;
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3_elem[l][m][n] =
fc3[adrs[0] + l * 9 + m * 3 + n];
fc3_elem[l][m][n] +=
fc3[adrs[1] + l * 9 + n * 3 + m];
fc3_elem[l][m][n] +=
fc3[adrs[2] + m * 9 + l * 3 + n];
fc3_elem[l][m][n] +=
fc3[adrs[3] + m * 9 + n * 3 + l];
fc3_elem[l][m][n] +=
fc3[adrs[4] + n * 9 + l * 3 + m];
fc3_elem[l][m][n] +=
fc3[adrs[5] + n * 9 + m * 3 + l];
fc3_elem[l][m][n] /= 6;
}
}
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs[0] + l * 9 + m * 3 + n] =
fc3_elem[l][m][n];
fc3[adrs[1] + l * 9 + n * 3 + m] =
fc3_elem[l][m][n];
fc3[adrs[2] + m * 9 + l * 3 + n] =
fc3_elem[l][m][n];
fc3[adrs[3] + m * 9 + n * 3 + l] =
fc3_elem[l][m][n];
fc3[adrs[4] + n * 9 + l * 3 + m] =
fc3_elem[l][m][n];
fc3[adrs[5] + n * 9 + m * 3 + l] =
fc3_elem[l][m][n];
}
}
}
}
}
}
free(done);
done = NULL;
}
static void transpose_compact_fc3_type01(double *fc3, const long p2s[],
const long s2pp[],
const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom,
const long t_type) {
/* Three types of index permutations */
/* t_type=0: dim[0] <-> dim[1] */
/* t_type=1: dim[0] <-> dim[2] */
/* t_type=2: dim[1] <-> dim[2] */
long i, j, k, l, m, n, i_p, j_p, i_trans, k_trans;
long adrs, adrs_t;
double fc3_elem[3][3][3];
char *done;
done = NULL;
done = (char *)malloc(sizeof(char) * n_satom * n_patom);
for (i = 0; i < n_satom * n_patom; i++) {
done[i] = 0;
}
for (i_p = 0; i_p < n_patom; i_p++) {
i = p2s[i_p];
for (j = 0; j < n_satom; j++) {
j_p = s2pp[j];
if (!done[i_p * n_satom + j]) {
/* (j, i) -- nsym_list[j] --> (j', i') */
/* nsym_list[j] translates j to j' where j' is in */
/* primitive cell. The same translation sends i to i' */
/* where i' is not necessarily to be in primitive cell. */
/* Thus, i' = perms[nsym_list[j] * n_satom + i] */
i_trans = perms[nsym_list[j] * n_satom + i];
done[i_p * n_satom + j] = 1;
done[j_p * n_satom + i_trans] = 1;
for (k = 0; k < n_satom; k++) {
k_trans = perms[nsym_list[j] * n_satom + k];
switch (t_type) {
case 0:
adrs = (i_p * n_satom * n_satom + j * n_satom + k) *
27;
adrs_t = (j_p * n_satom * n_satom +
i_trans * n_satom + k_trans) *
27;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3_elem[l][m][n] =
fc3[adrs + l * 9 + m * 3 + n];
}
}
}
if (adrs != adrs_t) {
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs + l * 9 + m * 3 + n] =
fc3[adrs_t + m * 9 + l * 3 + n];
}
}
}
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs_t + m * 9 + l * 3 + n] =
fc3_elem[l][m][n];
}
}
}
break;
case 1:
adrs = (i_p * n_satom * n_satom + k * n_satom + j) *
27;
adrs_t = (j_p * n_satom * n_satom +
k_trans * n_satom + i_trans) *
27;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3_elem[l][m][n] =
fc3[adrs + l * 9 + m * 3 + n];
}
}
}
if (adrs != adrs_t) {
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs + l * 9 + m * 3 + n] =
fc3[adrs_t + n * 9 + m * 3 + l];
}
}
}
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs_t + n * 9 + m * 3 + l] =
fc3_elem[l][m][n];
}
}
}
break;
} /* end switch */
}
}
}
}
free(done);
done = NULL;
}
static void transpose_compact_fc3_type2(double *fc3, const long p2s[],
const long s2pp[],
const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom) {
long j, k, l, m, n, i_p;
long adrs, adrs_t;
double fc3_elem[3][3][3];
for (i_p = 0; i_p < n_patom; i_p++) {
for (j = 0; j < n_satom; j++) {
for (k = j; k < n_satom; k++) { /* k >= j */
adrs = (i_p * n_satom * n_satom + j * n_satom + k) * 27;
adrs_t = (i_p * n_satom * n_satom + k * n_satom + j) * 27;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3_elem[l][m][n] = fc3[adrs + l * 9 + m * 3 + n];
}
}
}
if (k != j) {
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs + l * 9 + m * 3 + n] =
fc3[adrs_t + l * 9 + n * 3 + m];
}
}
}
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
fc3[adrs_t + l * 9 + n * 3 + m] = fc3_elem[l][m][n];
}
}
}
}
}
}
}
|
DRB037-truedepseconddimension-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
The inner loop has true dependence.
Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15
*/
#include <stdlib.h>
#include <stdio.h>
double b[1000][1000];
int main(int argc, char * argv[])
{
int i, j;
int n = 1000, m = 1000;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<m; j ++ )
{
b[i][j]=(i+j);
}
}
#pragma cetus private(i, j)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#1#0
for (j=1; j<m; j ++ )
{
b[i][j]=b[i][j-1];
}
}
printf("b[500][500]=%f\n", b[500][500]);
_ret_val_0=0;
return _ret_val_0;
}
|
sort.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <math.h>
#include <time.h>
#include <ParTI.h>
#include "sptensor.h"
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r);
static void spt_QuickSortAtMode(sptSparseTensor *tsr, sptNnzIndex const l, sptNnzIndex const r, sptIndex const mode);
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sk_bits);
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order);
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits);
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits);
static int spt_SparseTensorCompareAtMode(const sptSparseTensor *tsr1, sptNnzIndex const ind1, const sptSparseTensor *tsr2, sptNnzIndex const ind2, sptIndex const mode);
static int spt_SparseTensorCompareIndicesRowBlock(
const sptSparseTensor *tsr1,
sptNnzIndex loc1,
const sptSparseTensor *tsr2,
sptNnzIndex loc2,
const sptElementIndex sk_bits);
static int spt_SparseTensorCompareIndicesMorton3D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2);
static int spt_SparseTensorCompareIndicesMorton4D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2);
/* Mode order: X -> Y -> Z, x indices are sorted, y and z are Morton order sorted. */
static const uint32_t morton256_z[256] =
{
0x00000000,
0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200,
0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000,
0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200,
0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000,
0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200,
0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000,
0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200,
0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000,
0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200,
0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000,
0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200,
0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000,
0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200,
0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000,
0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200,
0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000,
0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200,
0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000,
0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200,
0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000,
0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200,
0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000,
0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200,
0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000,
0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200,
0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000,
0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200,
0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000,
0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200,
0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000,
0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200,
0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249
};
// pre-shifted table for Y coordinates (1 bit to the left)
static const uint32_t morton256_y[256] = {
0x00000000,
0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400,
0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000,
0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400,
0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000,
0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400,
0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000,
0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400,
0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000,
0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400,
0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000,
0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400,
0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000,
0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400,
0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000,
0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400,
0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000,
0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400,
0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000,
0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400,
0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000,
0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400,
0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000,
0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400,
0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000,
0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400,
0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000,
0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400,
0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000,
0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400,
0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000,
0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400,
0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492
};
// Pre-shifted table for x (2 bits to the left)
static const uint32_t morton256_x[256] = {
0x00000000,
0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800,
0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000,
0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800,
0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000,
0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800,
0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000,
0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800,
0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000,
0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800,
0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000,
0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800,
0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000,
0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800,
0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000,
0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800,
0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000,
0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800,
0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000,
0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800,
0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000,
0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800,
0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000,
0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800,
0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000,
0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800,
0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000,
0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800,
0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000,
0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800,
0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000,
0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800,
0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924
};
static inline void spt_SwapValues(sptSparseTensor *tsr, sptNnzIndex ind1, sptNnzIndex ind2) {
for(sptIndex i = 0; i < tsr->nmodes; ++i) {
sptIndex eleind1 = tsr->inds[i].data[ind1];
tsr->inds[i].data[ind1] = tsr->inds[i].data[ind2];
tsr->inds[i].data[ind2] = eleind1;
}
sptValue val1 = tsr->values.data[ind1];
tsr->values.data[ind1] = tsr->values.data[ind2];
tsr->values.data[ind2] = val1;
}
/*************************************************
* PUBLIC FUNCTIONS
*************************************************/
/**
* Determine the best mode order. Sort order: [mode, (ordered by increasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetBestModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != 0) {
for(sptIndex m=mode_loc; m>=1; --m) {
mode_order[m] = mode_order[m-1];
}
mode_order[0] = mode;
}
free(sorted_ndims);
}
/**
* Determine the worst mode order. Sort order: [(ordered by decreasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetWorstModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[nmodes - 1 - m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != nmodes - 1) {
for(sptIndex m=mode_loc; m<nmodes; ++m) {
mode_order[m] = mode_order[m+1];
}
mode_order[nmodes - 1] = mode;
}
free(sorted_ndims);
}
/**
* Sort COO sparse tensor by Z-Morton order. (The same with "sptPreprocessSparseTensor" function in "convert.c" without setting kschr.)
* Kernels in Row-major order, blocks and elements are in Z-Morton order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorMixedOrder(
sptSparseTensor *tsr,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
int result;
/* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */
sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk);
sptNnzIndexVector kptr;
result = sptNewNnzIndexVector(&kptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptSetKernelPointers(&kptr, tsr, sk_bits);
spt_CheckError(result, "HiSpTns Preprocess", NULL);
/* Sort blocks in each kernel in Morton-order */
sptNnzIndex k_begin, k_end;
/* Loop for all kernels, 0-kptr.len for OMP code */
for(sptNnzIndex k=0; k<kptr.len - 1; ++k) {
k_begin = kptr.data[k];
k_end = kptr.data[k+1]; // exclusive
/* Sort blocks in each kernel in Morton-order */
sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk);
}
return 0;
}
/**
* Sort COO sparse tensor by plain blocked order for modes except mode-n. Blocks are in Row-major order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorSortPartialIndex(
sptSparseTensor *tsr,
sptIndex const * mode_order,
const sptElementIndex sb_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
sptIndex * ndims = tsr->ndims;
sptIndex const mode = mode_order[0];
int result;
sptSparseTensorSortIndexCustomOrder(tsr, mode_order, 1, tk);
sptNnzIndexVector sptr;
result = sptNewNnzIndexVector(&sptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
sptNnzIndex slice_nnz = 0;
sptIndex pre_idx = tsr->inds[mode].data[0];
result = sptAppendNnzIndexVector(&sptr, 0);
for (sptNnzIndex z = 0; z < nnz; ++z ) {
++ slice_nnz;
if (tsr->inds[mode].data[z] > pre_idx ) {
result = sptAppendNnzIndexVector(&sptr, slice_nnz-1);
pre_idx = tsr->inds[mode].data[z];
}
}
result = sptAppendNnzIndexVector(&sptr, nnz);
sptDumpNnzIndexVector(&sptr, stdout);
sptNnzIndex s_begin, s_end;
// Loop for slices
for(sptNnzIndex s = 0; s < ndims[mode]; ++ s) {
s_begin = sptr.data[s];
s_end = sptr.data[s+1]; // exclusive
/* Sort blocks in each kernel in plain row-order */
sptSparseTensorSortIndexRowBlock(tsr, 1, s_begin, s_end, sb_bits, tk);
}
return 0;
}
/**
* Randomly shuffle all nonzeros.
*
* @param[in] tsr tensor to be shuffled
*
*/
void sptGetRandomShuffleElements(sptSparseTensor *tsr) {
sptNnzIndex const nnz = tsr->nnz;
for(sptNnzIndex z=0; z<nnz; ++z) {
srand(z+1);
sptValue rand_val = (sptValue) rand() / (sptValue) RAND_MAX;
sptNnzIndex new_loc = (sptNnzIndex) ( rand_val * nnz ) % nnz;
spt_SwapValues(tsr, z, new_loc);
}
}
/**
* Randomly shuffle all indices.
*
* @param[in] tsr tensor to be shuffled
* @param[out] map_inds records the randomly generated mapping
*
*/
void sptGetRandomShuffledIndices(sptSparseTensor *tsr, sptIndex ** map_inds) {
/* Get randomly renumbering indices */
for(sptIndex m = 0; m < tsr->nmodes; ++m) {
sptIndex dim_len = tsr->ndims[m];
for(sptIndex i = dim_len - 1; i > 0; --i) {
srand(m+i+1+time(NULL));
sptIndex new_loc = (sptIndex) (rand() % (i+1));
/* Swap i <-> new_loc */
sptIndex tmp = map_inds[m][i];
map_inds[m][i] = map_inds[m][new_loc];
map_inds[m][new_loc] = tmp;
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by Morton-order.
* @param hitsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexMorton(
sptSparseTensor *tsr,
int force,
const sptNnzIndex begin,
const sptNnzIndex end,
const sptElementIndex sb_bits,
int tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
/* TODO: add support for other order tensors */
switch(tsr->nmodes) {
case 3:
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexMorton3D(tsr, begin, end, sb_bits);
}
}
break;
case 4:
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexMorton4D(tsr, begin, end, sb_bits);
}
}
break;
default:
printf("No support for more than 4th-order tensors yet.\n");
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexRowBlock(
sptSparseTensor *tsr,
int force,
const sptNnzIndex begin,
const sptNnzIndex end,
const sptElementIndex sk_bits,
int const tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexRowBlock(tsr, begin, end, sk_bits);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexExceptSingleMode(sptSparseTensor *tsr, int force, sptIndex * mode_order, int const tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexExceptSingleMode(tsr, 0, tsr->nnz, mode_order);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically in a customized order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexCustomOrder(sptSparseTensor *tsr, sptIndex const * mode_order, int force, int tk) {
sptIndex nmodes = tsr->nmodes;
sptIndex m;
sptSparseTensor tsr_temp; // Only copy pointers, not real data.
if(!force && memcmp(tsr->sortorder, mode_order, nmodes * sizeof (sptIndex)) == 0) {
return;
}
tsr_temp.nmodes = nmodes;
tsr_temp.sortorder = tsr->sortorder;
tsr_temp.ndims = malloc(nmodes * sizeof tsr_temp.ndims[0]);
tsr_temp.nnz = tsr->nnz;
tsr_temp.inds = malloc(nmodes * sizeof tsr_temp.inds[0]);
tsr_temp.values = tsr->values;
for(m = 0; m < nmodes; ++m) {
tsr_temp.ndims[m] = tsr->ndims[mode_order[m]];
tsr_temp.inds[m] = tsr->inds[mode_order[m]];
}
sptSparseTensorSortIndex(&tsr_temp, 1, tk);
free(tsr_temp.inds);
free(tsr_temp.ndims);
for(m = 0; m < nmodes; ++m) {
tsr->sortorder[m] = mode_order[m];
}
}
/**
* Reorder the elements in a sparse tensor lexicographically
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndex(sptSparseTensor *tsr, int force, int tk)
{
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndex(tsr, 0, tsr->nnz);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, but consider mode `mode` the last one
* @param tsr the sparse tensor to operate on
* @param mode the mode to be considered the last
*/
void sptSparseTensorSortIndexAtMode(sptSparseTensor *tsr, sptIndex const mode, int force, int tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < mode; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
for(m = mode+1; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m-1] != m) {
tsr->sortorder[m-1] = m;
needsort = 1;
}
}
if(tsr->sortorder[tsr->nmodes-1] != mode) {
tsr->sortorder[tsr->nmodes-1] = mode;
needsort = 1;
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortAtMode(tsr, 0, tsr->nnz, mode);
}
}
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndices(const sptSparseTensor *tsr1, sptNnzIndex loc1, const sptSparseTensor *tsr2, sptNnzIndex loc2) {
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
/**
* compare two indices from two identical or distinct sparse tensors lexicographically in all modes except mode
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @param mode the mode to be excluded in comparison
* @return -1 for less, 0 for equal, 1 for greater
*/
/*************************************************
* Comparison functions
*************************************************/
static int spt_SparseTensorCompareAtMode(const sptSparseTensor *tsr1, sptNnzIndex const ind1, const sptSparseTensor *tsr2, sptNnzIndex const ind2, sptIndex const mode) {
sptIndex i;
sptIndex eleind1, eleind2;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
if(i != mode) {
eleind1 = tsr1->inds[i].data[ind1];
eleind2 = tsr2->inds[i].data[ind2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
}
eleind1 = tsr1->inds[mode].data[ind1];
eleind2 = tsr2->inds[mode].data[ind2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
} else {
return 0;
}
}
int spt_SparseTensorCompareIndicesExceptSingleMode(const sptSparseTensor *tsr1, sptNnzIndex loc1, const sptSparseTensor *tsr2, sptNnzIndex loc2, sptIndex * mode_order) {
sptIndex i, m;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes - 1; ++ i) {
m = mode_order[i];
sptIndex eleind1 = tsr1->inds[m].data[loc1];
sptIndex eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesRowBlock(
const sptSparseTensor *tsr1,
sptNnzIndex loc1,
const sptSparseTensor *tsr2,
sptNnzIndex loc2,
const sptElementIndex sk_bits)
{
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
sptIndex blkind1 = eleind1 >> sk_bits;
sptIndex blkind2 = eleind2 >> sk_bits;
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support 3-D, 4-D for uint32_t indices.
* When tensor order is large than 5, index ranges are limited.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton3D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1 = 0, mkey2 = 0;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
mkey1 = morton256_z[(z1 >> 24) & 0xFF ] |
morton256_y[(y1 >> 24) & 0xFF ] |
morton256_x[(x1 >> 24) & 0xFF ];
mkey1 = mkey1 << 72 |
morton256_z[(z1 >> 16) & 0xFF ] |
morton256_y[(y1 >> 16) & 0xFF ] |
morton256_x[(x1 >> 16) & 0xFF ];
mkey1 = mkey1 << 48 |
morton256_z[(z1 >> 8) & 0xFF ] |
morton256_y[(y1 >> 8) & 0xFF ] |
morton256_x[(x1 >> 8) & 0xFF ];
mkey1 = mkey1 << 24 |
morton256_z[(z1) & 0xFF ] |
morton256_y[(y1) & 0xFF ] |
morton256_x[(x1) & 0xFF ];
mkey2 = morton256_z[(z2 >> 24) & 0xFF ] |
morton256_y[(y2 >> 24) & 0xFF ] |
morton256_x[(x2 >> 24) & 0xFF ];
mkey2 = mkey2 << 72 |
morton256_z[(z2 >> 16) & 0xFF ] |
morton256_y[(y2 >> 16) & 0xFF ] |
morton256_x[(x2 >> 16) & 0xFF ];
mkey2 = mkey2 << 48 |
morton256_z[(z2 >> 8) & 0xFF ] |
morton256_y[(y2 >> 8) & 0xFF ] |
morton256_x[(x2 >> 8) & 0xFF ];
mkey2 = mkey2 << 24 |
morton256_z[(z2) & 0xFF ] |
morton256_y[(y2) & 0xFF ] |
morton256_x[(x2) & 0xFF ];
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support arbitrary tensor orders.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton4D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1, mkey2;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t w1 = tsr1->inds[3].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
uint32_t w2 = tsr2->inds[3].data[loc2];
static const uint64_t MASKS_64[]={0x5555555555555555, 0x3333333333333333, 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, 0x0000FFFF0000FFFF};
static const uint64_t SHIFTS_64[]= {1, 2, 4, 8, 16};
static sptMortonIndex MASKS_128[] = {
(sptMortonIndex)0x5555555555555555 << 64 | 0x5555555555555555,
(sptMortonIndex)0x3333333333333333 << 64 | 0x3333333333333333,
(sptMortonIndex)0x0F0F0F0F0F0F0F0F << 64 | 0x0F0F0F0F0F0F0F0F,
(sptMortonIndex)0x00FF00FF00FF00FF << 64 | 0x00FF00FF00FF00FF,
(sptMortonIndex)0x0000FFFF0000FFFF << 64 | 0x0000FFFF0000FFFF,
(sptMortonIndex)0x00000000FFFFFFFF << 64 | 0x00000000FFFFFFFF};
static const uint64_t SHIFTS_128[]= {1, 2, 4, 8, 16, 32};
// sptMortonIndex tmp_mask = MASKS_128[2];
// printf("tmp_mask: high: %"PRIX64 " ; low: %"PRIX64 " .\n", (uint64_t)(tmp_mask >> 64), (uint64_t)tmp_mask);
uint64_t tmp_64;
sptMortonIndex x, y, z, w;
/**** compute mkey1 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
// mkey1 = x | (y << 1) | (z << 2) | (w << 3);
mkey1 = w | (z << 1) | (y << 2) | (x << 3);
/**** compute mkey2 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
mkey2 = w | (z << 1) | (y << 2) | (x << 3);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/*************************************************
* Quicksort functions
*************************************************/
static void spt_QuickSortAtMode(sptSparseTensor *tsr, sptNnzIndex const l, sptNnzIndex const r, sptIndex const mode) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareAtMode(tsr, i, tsr, p, mode) < 0) {
++i;
}
while(spt_SparseTensorCompareAtMode(tsr, p, tsr, j, mode) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortAtMode(tsr, l, i, mode);
}
spt_QuickSortAtMode(tsr, i, r, mode);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits) {
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexMorton3D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton3D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits) {
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton4D(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesMorton4D(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexMorton4D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton4D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sk_bits) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesRowBlock(tsr, i, tsr, p, sk_bits) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesRowBlock(tsr, p, tsr, j, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexRowBlock(tsr, l, i, sk_bits);
}
spt_QuickSortIndexRowBlock(tsr, i, r, sk_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, p, mode_order) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, p, tsr, j, mode_order) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, mode_order)
{
spt_QuickSortIndexExceptSingleMode(tsr, l, i, mode_order);
}
spt_QuickSortIndexExceptSingleMode(tsr, i, r, mode_order);
#pragma omp taskwait
}
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndices(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndices(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndex(tsr, l, i);
}
spt_QuickSortIndex(tsr, i, r);
#pragma omp taskwait
}
|
GB_binop__bclr_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int16)
// C=scalar+B GB (_bind1st__bclr_int16)
// C=scalar+B' GB (_bind1st_tran__bclr_int16)
// C=A+scalar GB (_bind2nd__bclr_int16)
// C=A'+scalar GB (_bind2nd_tran__bclr_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, int16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT16 || GxB_NO_BCLR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, int16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, int16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#if !defined(CTC_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
num_threads_(num_threads), workspace_(workspace),
blank_label_(blank_label) {
#if defined(CTC_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
}
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
int num_threads_;
int blank_label_;
void* workspace_;
void softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation);
denom += probs[r + col_offset];
}
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] /= denom;
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
GB_unop__identity_fc64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fp64)
// op(A') function: GB (_unop_tran__identity_fc64_fp64)
// C type: GxB_FC64_t
// A type: double
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fp64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
krb5pa-md5_fmt_plug.c | /*
* Kerberos 5 etype 23 "PA ENC TIMESTAMP" by magnum
*
* Previously called mskrb5 because I had the idea it was Micro$oft specific.
*
* Pcap file -> input file:
* 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml
* 2. krbng2john.py ~/capture.pdml > krb5.in
* 3. Run john on krb5.in
*
* Legacy input format:
* user:$mskrb5$user$realm$checksum$timestamp
*
* New input format from krbpa2john.py (the above is still supported)
* user:$krb5pa$etype$user$realm$salt$timestamp+checksum
*
* user, realm and salt are unused in this format.
*
* This attacks a known-plaintext vulnerability in AS_REQ pre-auth packets. The
* known plaintext is a UTC timestamp in the format 20081120171510Z. Only if
* this indicate a match we decrypt the whole timestamp and calculate our own
* checksum to be really sure.
*
* The plaintext attack combined with re-using key setup was said to result in
* more than 60% speedup. This was confirmed using John the Ripper and variants
* of this code.
*
* http://www.ietf.org/rfc/rfc4757.txt
* http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html
*
* OMP is supported and scales very well now.
*
* This software is Copyright (c) 2011-2012 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mskrb5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mskrb5);
#else
#if AC_BUILT
#include "autoconfig.h"
#endif
#include <sys/types.h>
#include <sys/stat.h>
#if !AC_BUILT || HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "common.h"
#include "unicode.h"
#include "md5.h"
#include "hmacmd5.h"
#include "md4.h"
#include "rc4.h"
#include "memdbg.h"
#define FORMAT_LABEL "krb5pa-md5"
#define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 23" /* md4 rc4-hmac-md5 */
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 125
#define MAX_REALMLEN 64
#define MAX_USERLEN 64
#define MAX_SALTLEN 128
#define TIMESTAMP_SIZE 36
#define CHECKSUM_SIZE 16
#define KEY_SIZE 16
#define BINARY_SIZE CHECKSUM_SIZE
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct salt_t)
#define SALT_ALIGN 4
#define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN)
// these may be altered in init() if running OMP
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define OMP_SCALE 1024
#define HEXCHARS "0123456789abcdefABCDEF"
// Second and third plaintext will be replaced in init() under come encodings
static struct fmt_tests tests[] = {
{"$krb5pa$23$user$realm$salt$afcbe07c32c3450b37d0f2516354570fe7d3e78f829e77cdc1718adf612156507181f7daeb03b6fbcfe91f8346f3c0ae7e8abfe5", "John"},
{"$mskrb5$john$JOHN.DOE.MS.COM$02E837D06B2AC76891F388D9CC36C67A$2A9785BF5036C45D3843490BF9C228E8C18653E10CE58D7F8EF119D2EF4F92B1803B1451", "fr2beesgr"},
{"$mskrb5$user1$EXAMPLE.COM$08b5adda3ab0add14291014f1d69d145$a28da154fa777a53e23059647682eee2eb6c1ada7fb5cad54e8255114270676a459bfe4a", "openwall"},
{"$mskrb5$hackme$EXAMPLE.NET$e3cdf70485f81a85f7b59a4c1d6910a3$6e2f6705551a76f84ec2c92a9dd0fef7b2c1d4ca35bf1b02423359a3ecaa19bdf07ed0da", "openwall@123"},
{"$mskrb5$$$98cd00b6f222d1d34e08fe0823196e0b$5937503ec29e3ce4e94a051632d0fff7b6781f93e3decf7dca707340239300d602932154", ""},
{"$mskrb5$$$F4085BA458B733D8092E6B348E3E3990$034ACFC70AFBA542690B8BC912FCD7FED6A848493A3FF0D7AF641A263B71DCC72902995D", "frank"},
{"$mskrb5$user$realm$eb03b6fbcfe91f8346f3c0ae7e8abfe5$afcbe07c32c3450b37d0f2516354570fe7d3e78f829e77cdc1718adf612156507181f7da", "John"},
{"$mskrb5$$$881c257ce5df7b11715a6a60436e075a$c80f4a5ec18e7c5f765fb9f00eda744a57483db500271369cf4752a67ca0e67f37c68402", "the"},
{"$mskrb5$$$ef012e13c8b32448241091f4e1fdc805$354931c919580d4939421075bcd50f2527d092d2abdbc0e739ea72929be087de644cef8a", "Ripper"},
{"$mskrb5$$$334ef74dad191b71c43efaa16aa79d88$34ebbad639b2b5a230b7ec1d821594ed6739303ae6798994e72bd13d5e0e32fdafb65413", "VeryveryveryloooooooongPassword"},
{NULL}
};
static struct salt_t {
ARCH_WORD_32 checksum[CHECKSUM_SIZE / sizeof(ARCH_WORD_32)];
unsigned char timestamp[TIMESTAMP_SIZE];
} *cur_salt;
static char (*saved_plain)[(PLAINTEXT_LENGTH+4)];
static int (*saved_len);
static ARCH_WORD_32 (*output)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static HMACMD5Context (*saved_ctx);
static int keys_prepared;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int n = MIN_KEYS_PER_CRYPT * omp_get_max_threads();
if (n < MIN_KEYS_PER_CRYPT)
n = MIN_KEYS_PER_CRYPT;
self->params.min_keys_per_crypt = n;
n *= OMP_SCALE;
self->params.max_keys_per_crypt = n;
#endif
saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
output = mem_calloc_tiny(sizeof(*output) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_ctx = mem_calloc_tiny(sizeof(*saved_ctx) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
if (pers_opts.target_enc == UTF_8) {
tests[1].plaintext = "\xC3\xBC"; // German u-umlaut in UTF-8
tests[1].ciphertext = "$mskrb5$$$958db4ddb514a6cc8be1b1ccf82b0191$090408357a6f41852d17f3b4bb4634adfd388db1be64d3fe1a1d75ee4338d2a4aea387e5";
tests[2].plaintext = "\xC3\x9C\xC3\x9C"; // 2x uppercase of them
tests[2].ciphertext = "$mskrb5$$$057cd5cb706b3de18e059912b1f057e3$fe2e561bd4e42767e972835ea99f08582ba526e62a6a2b6f61364e30aca7c6631929d427";
} else {
if (CP_to_Unicode[0xfc] == 0x00fc) {
tests[1].plaintext = "\xFC"; // German u-umlaut in many ISO-8859-x
tests[1].ciphertext = "$mskrb5$$$958db4ddb514a6cc8be1b1ccf82b0191$090408357a6f41852d17f3b4bb4634adfd388db1be64d3fe1a1d75ee4338d2a4aea387e5";
}
if (CP_to_Unicode[0xdc] == 0x00dc) {
tests[2].plaintext = "\xDC\xDC"; // 2x uppercase of them
tests[2].ciphertext = "$mskrb5$$$057cd5cb706b3de18e059912b1f057e3$fe2e561bd4e42767e972835ea99f08582ba526e62a6a2b6f61364e30aca7c6631929d427";
}
}
}
static void *salt(char *ciphertext)
{
static struct salt_t salt;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < TIMESTAMP_SIZE; i++) {
salt.timestamp[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
for (i = 0; i < CHECKSUM_SIZE; i++) {
((unsigned char*)salt.checksum)[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return (void*)&salt;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
char *data;
if (!strncmp(ciphertext, "$mskrb5$", 8)) {
char in[TOTAL_LENGTH + 1];
char *c, *t;
strnzcpy(in, ciphertext, sizeof(in));
t = strrchr(in, '$'); *t++ = 0;
c = strrchr(in, '$'); *c++ = 0;
snprintf(out, sizeof(out), "$krb5pa$23$$$$%s%s", t, c);
} else {
char *tc;
tc = strrchr(ciphertext, '$');
snprintf(out, sizeof(out), "$krb5pa$23$$$$%s", ++tc);
}
data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1;
strlwr(data);
return out;
}
static void *binary(char *ciphertext)
{
static unsigned char *binary;
char *p;
int i;
if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = strrchr(ciphertext, '$') + 1;
p += 2 * TIMESTAMP_SIZE;
for (i = 0; i < CHECKSUM_SIZE; i++) {
binary[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return (void*)binary;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *data = ciphertext, *p;
if (!strncmp(ciphertext, "$mskrb5$", 8)) {
data += 8;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
data = p + 1;
// checksum
p = strchr(data, '$');
if (!p || p - data != 2 * CHECKSUM_SIZE ||
strspn(data, HEXCHARS) != p - data)
return 0;
data = p + 1;
// encrypted timestamp
p += strlen(data) + 1;
if (*p || p - data != TIMESTAMP_SIZE * 2 ||
strspn(data, HEXCHARS) != p - data)
return 0;
return 1;
} else if (!strncmp(ciphertext, "$krb5pa$23$", 11)) {
data += 11;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
data = p + 1;
// salt field
p = strchr(data, '$');
if (!p || p - data > MAX_SALTLEN)
return 0;
data = p + 1;
// timestamp+checksum
p += strlen(data) + 1;
if (*p || p - data != (TIMESTAMP_SIZE + CHECKSUM_SIZE) * 2 ||
strspn(data, HEXCHARS) != p - data)
return 0;
return 1;
}
return 0;
}
static void set_key(char *key, int index)
{
saved_len[index] = strlen(key);
memcpy(saved_plain[index], key, saved_len[index] + 1);
keys_prepared = 0;
}
static char *get_key(int index)
{
return (char *) saved_plain[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
const unsigned char one[] = { 1, 0, 0, 0 };
int i = 0;
if (!keys_prepared) {
#ifdef _OPENMP
#pragma omp parallel for
for (i = 0; i < count; i++)
#endif
{
int len;
unsigned char K[KEY_SIZE];
unsigned char K1[KEY_SIZE];
// K = MD4(UTF-16LE(password)), ordinary 16-byte NTLM hash
len = E_md4hash((unsigned char *) saved_plain[i], saved_len[i], K);
if (len <= 0)
((char*)(saved_plain[i]))[-len] = 0; // match truncation
// K1 = HMAC-MD5(K, 1)
// 1 is encoded as little endian in 4 bytes (0x01000000)
hmac_md5(K, (unsigned char *) &one, 4, K1);
// We do key setup of the next HMAC_MD5 here. rest in inner loop
hmac_md5_init_K16(K1, &saved_ctx[i]);
}
keys_prepared = 1;
}
#ifdef _OPENMP
#pragma omp parallel for
for (i = 0; i < count; i++)
#endif
{
unsigned char K3[KEY_SIZE], cleartext[TIMESTAMP_SIZE];
HMACMD5Context ctx;
// key set up with K1 is stored in saved_ctx[i]
// K3 = HMAC-MD5(K1, CHECKSUM)
memcpy(&ctx, &saved_ctx[i], sizeof(ctx));
hmac_md5_update((unsigned char*)cur_salt->checksum,
CHECKSUM_SIZE, &ctx);
hmac_md5_final(K3, &ctx);
// Decrypt part of the timestamp with the derived key K3
RC4_single(K3, KEY_SIZE, cur_salt->timestamp, 16, cleartext);
// Bail out unless we see known plaintext
if (cleartext[14] == '2' && cleartext[15] == '0') {
// Decrypt the rest of the timestamp
RC4_single(K3, KEY_SIZE, cur_salt->timestamp,
TIMESTAMP_SIZE, cleartext);
if (cleartext[28] == 'Z') {
// create checksum K2 = HMAC-MD5(K1, plaintext)
memcpy(&ctx, &saved_ctx[i], sizeof(ctx));
hmac_md5_update(cleartext, TIMESTAMP_SIZE, &ctx);
hmac_md5_final((unsigned char*)output[i], &ctx);
}
} else {
output[i][0] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (index = 0; index < count; index++)
#endif
if (*(ARCH_WORD_32*)binary == output[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, output[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return output[index][0] & 0xf; }
static int get_hash_1(int index) { return output[index][0] & 0xff; }
static int get_hash_2(int index) { return output[index][0] & 0xfff; }
static int get_hash_3(int index) { return output[index][0] & 0xffff; }
static int get_hash_4(int index) { return output[index][0] & 0xfffff; }
static int get_hash_5(int index) { return output[index][0] & 0xffffff; }
static int get_hash_6(int index) { return output[index][0] & 0x7ffffff; }
static int salt_hash(void *salt)
{
return (((struct salt_t*)salt)->checksum[0]) & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mskrb5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
binary,
salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
clange.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlange.c, normal z -> c, Fri Sep 28 17:38:07 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lange
*
* Returns the norm of a general matrix as
*
* clange = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm
* (
* ( norm1(A), NORM = PlasmaOneNorm
* (
* ( normI(A), NORM = PlasmaInfNorm
* (
* ( normF(A), NORM = PlasmaFrobeniusNorm
*
* where norm1 denotes the one norm of a matrix (maximum column sum),
* normI denotes the infinity norm of a matrix (maximum row sum) and
* normF denotes the Frobenius norm of a matrix (square root of sum
* of squares). Note that max(abs(A(i,j))) is not a consistent matrix
* norm.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: max norm
* - PlasmaOneNorm: one norm
* - PlasmaInfNorm: infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] m
* The number of rows of the matrix A. m >= 0. When m = 0,
* the returned value is set to zero.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0. When n = 0,
* the returned value is set to zero.
*
* @param[in] pA
* The m-by-n matrix A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
*******************************************************************************
*
* @retval float
* The specified norm of the general matrix A.
*
*******************************************************************************
*
* @sa plasma_omp_clange
* @sa plasma_clange
* @sa plasma_slange
* @sa plasma_slange
*
******************************************************************************/
float plasma_clange(plasma_enum_t norm,
int m, int n,
plasma_complex32_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) {
plasma_error("illegal value of norm");
return -1;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, m)) {
printf("%d\n", lda);
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imin(n, m) == 0)
return 0.0;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lange(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Allocate workspace.
float *work = NULL;
switch (norm) {
case PlasmaMaxNorm:
work = (float*)malloc((size_t)A.mt*A.nt*sizeof(float));
break;
case PlasmaOneNorm:
work = (float*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(float));
break;
case PlasmaInfNorm:
work = (float*)malloc(((size_t)A.nt*A.m+A.m)*sizeof(float));
break;
case PlasmaFrobeniusNorm:
work = (float*)malloc((size_t)2*A.mt*A.nt*sizeof(float));
break;
}
if (work == NULL) {
plasma_error("malloc() failed");
return PlasmaErrorOutOfMemory;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
float value;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_clange(norm, A, work, &value, &sequence, &request);
}
// implicit synchronization
free(work);
// Free matrix in tile layout.
plasma_desc_destroy(&A);
// Return the norm.
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_lange
*
* Calculates the max, one, infinity or Frobenius norm of a general matrix.
* Non-blocking equivalent of plasma_clange(). May return before the
* computation is finished. Operates on matrices stored by tiles. All matrices
* are passed through descriptors. All dimensions are taken from the
* descriptors. Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: Max norm
* - PlasmaOneNorm: One norm
* - PlasmaInfNorm: Infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] A
* The descriptor of matrix A.
*
* @param[out] work
* Workspace of size:
* - PlasmaMaxNorm: A.mt*A.nt
* - PlasmaOneNorm: A.mt*A.n + A.n
* - PlasmaInfNorm: A.nt*A.m + A.m
* - PlasmaFrobeniusNorm: 2*A.mt*A.nt
*
* @param[out] value
* The calculated value of the norm requested.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_clange
* @sa plasma_omp_clange
* @sa plasma_omp_slange
* @sa plasma_omp_slange
*
******************************************************************************/
void plasma_omp_clange(plasma_enum_t norm, plasma_desc_t A,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) {
plasma_error("illegal value of norm");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0) {
*value = 0.0;
return;
}
// Call the parallel function.
plasma_pclange(norm, A, work, value, sequence, request);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.