source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
if (from.type_flag_ == mshadow::kBool || to.type_flag_ == mshadow::kBool) {
CHECK_EQ(from.type_flag_, to.type_flag_) << "Only supports copying between boolean ndarrays.";
mshadow::Copy(to.FlatTo1D<xpu, bool>(s), from.FlatTo1D<xpu, bool>(s), s);
return;
}
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
Gemm_MT_Loop5_MRxNRKernel_ver3.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include<immintrin.h>
#define alpha( i,j ) A[ (j)*ldA + (i) ] // map alpha( i,j ) to array A
#define beta( i,j ) B[ (j)*ldB + (i) ] // map beta( i,j ) to array B
#define gamma( i,j ) C[ (j)*ldC + (i) ] // map gamma( i,j ) to array C
#define min( x, y ) ( ( x ) < ( y ) ? x : y )
void LoopFive( int, int, int, double *, int, double *, int, double *, int );
void LoopFour( int, int, int, double *, int, double *, int, double *, int );
void LoopThree( int, int, int, double *, int, double *, double *, int );
void LoopTwo( int, int, int, double *, double *, double *, int );
void LoopOne( int, int, int, double *, double *, double *, int );
void Gemm_MRxNRKernel_Packed( int, double *, double *, double *, int );
void PackBlockA_MCxKC( int, int, double *, int, double * );
void PackPanelB_KCxNC( int, int, double *, int, double * );
double* Atilde = NULL;
double* Btilde = NULL;
void MyGemm( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
if ( m % MR != 0 || MC % MR != 0 ){
printf( "m and MC must be multiples of MR\n" );
exit( 0 );
}
if ( n % NR != 0 || NC % NR != 0 ){
printf( "n and NC must be multiples of NR\n" );
exit( 0 );
}
LoopFive( m, n, k, A, ldA, B, ldB, C, ldC );
}
void LoopFive( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
int max_threads = omp_get_max_threads();
int loadbalanced_part = (n/(NC*max_threads)) * NC * max_threads;
int remainder = n - loadbalanced_part;
Atilde = ( double * ) _mm_malloc( MC * KC * omp_get_max_threads() * sizeof( double ), 64 );
Btilde = ( double * ) _mm_malloc( KC * NC * omp_get_max_threads() * sizeof( double ), 64 );
#pragma omp parallel for
for ( int j=0; j< loadbalanced_part; j+=NC ) {
int jb = NC; //min( NC, n-j ); /* Last loop may not involve a full block */
LoopFour( m, jb, k, A, ldA, &beta( 0,j ), ldB, &gamma( 0,j ), ldC );
}
int remainder_per_thread = ((remainder / max_threads) / NR) * NR;
if (remainder_per_thread == 0) remainder_per_thread = NR;
#pragma omp parallel for
for (int j = loadbalanced_part; j < n; j += remainder_per_thread) {
int jb = min (remainder_per_thread, n-j );
LoopFour (m, jb, k , A, ldA, &beta(0, j ), ldB, &gamma(0, j ), ldC );
}
_mm_free(Atilde);
_mm_free(Btilde);
}
void LoopFour( int m, int n, int k, double *A, int ldA, double *B, int ldB,
double *C, int ldC )
{
//double *Btilde = ( double * ) _mm_malloc( KC * NC * sizeof( double ), 64 );
for ( int p=0; p<k; p+=KC ) {
int pb = min( KC, k-p ); /* Last loop may not involve a full block */
PackPanelB_KCxNC( pb, n, &beta( p, 0 ), ldB, &Btilde[ NC * KC * omp_get_thread_num() ] );
LoopThree( m, n, pb, &alpha( 0, p ), ldA, &Btilde[ NC * KC * omp_get_thread_num() ], C, ldC );
}
//_mm_free( Btilde);
}
void LoopThree( int m, int n, int k, double *A, int ldA, double *Btilde, double *C, int ldC )
{
//double *Atilde = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 );
for ( int i=0; i<m; i+=MC ) {
int ib = min( MC, m-i ); /* Last loop may not involve a full block */
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Ã[ MC * KC * omp_get_thread_num() ] );
LoopTwo( ib, n, k, Ã[ MC * KC * omp_get_thread_num() ], Btilde, &gamma( i,0 ), ldC );
}
//_mm_free( Atilde);
}
void LoopTwo( int m, int n, int k, double *Atilde, double *Btilde, double *C, int ldC )
{
for ( int j=0; j<n; j+=NR ) {
int jb = min( NR, n-j );
LoopOne( m, jb, k, Atilde, &Btilde[ j*k ], &gamma( 0,j ), ldC );
}
}
void LoopOne( int m, int n, int k, double *Atilde, double *MicroPanelB, double *C, int ldC )
{
for ( int i=0; i<m; i+=MR ) {
int ib = min( MR, m-i );
Gemm_MRxNRKernel_Packed( k, Ã[ i*k ], MicroPanelB, &gamma( i,0 ), ldC );
}
}
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static double **DestroyPixelThreadSet(double **pixels)
{
register ssize_t
i;
assert(pixels != (double **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (double *) NULL)
pixels[i]=(double *) RelinquishMagickMemory(pixels[i]);
pixels=(double **) RelinquishMagickMemory(pixels);
return(pixels);
}
static double **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
double
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(double **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (double **) NULL)
return((double **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(double *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (double *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) context;
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image,exception);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsHPROFILE
source_profile;
CMSExceptionInfo
cms_exception;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(CMSExceptionHandler);
cms_exception.image=image;
cms_exception.exception=exception;
(void) cms_exception;
source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
double
**magick_restrict source_pixels,
source_scale,
**magick_restrict target_pixels,
target_scale;
int
intent;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR((cmsContext)
&cms_exception,GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
source_scale=1.0;
source_channels=3;
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_channels=4;
source_scale=100.0;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_DBL;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_DBL;
source_scale=100.0;
break;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
target_scale=1.0;
target_channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_channels=4;
target_scale=0.01;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_DBL;
target_scale=0.01;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_DBL;
target_channels=1;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (double **) NULL) ||
(target_pixels == (double **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register double
*p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=source_scale*QuantumScale*GetPixelRed(image,q);
if (source_channels > 1)
{
*p++=source_scale*QuantumScale*GetPixelGreen(image,q);
*p++=source_scale*QuantumScale*GetPixelBlue(image,q);
}
if (source_channels > 3)
*p++=source_scale*QuantumScale*GetPixelBlack(image,q);
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_channels == 1)
SetPixelGray(image,ClampToQuantum(target_scale*
QuantumRange*(*p)),q);
else
SetPixelRed(image,ClampToQuantum(target_scale*
QuantumRange*(*p)),q);
p++;
if (target_channels > 1)
{
SetPixelGreen(image,ClampToQuantum(target_scale*
QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(target_scale*
QuantumRange*(*p)),q);
p++;
}
if (target_channels > 3)
{
SetPixelBlack(image,ClampToQuantum(target_scale*
QuantumRange*(*p)),q);
p++;
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent],
property[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%s:*",name);
(void) GetImageProperty(image,property,exception);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
sparselu-task-dep.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <libgen.h>
#include "sparselu.h"
void sparselu_par_call_task_dep(float **BENCH, int matrix_size, int submatrix_size)
{
int ii, jj, kk;
#pragma omp parallel private(kk,ii,jj) shared(BENCH)
#pragma omp single /* nowait */
{
/*#pragma omp task untied*/
for (kk=0; kk<matrix_size; kk++)
{
#pragma omp task firstprivate(kk) shared(BENCH) depend(inout: BENCH[kk*matrix_size+kk:submatrix_size*submatrix_size])
lu0(BENCH[kk*matrix_size+kk], submatrix_size);
for (jj=kk+1; jj<matrix_size; jj++)
if (BENCH[kk*matrix_size+jj] != NULL)
{
#pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in: BENCH[kk*matrix_size+kk:submatrix_size*submatrix_size]) depend(inout: BENCH[kk*matrix_size+jj:submatrix_size*submatrix_size])
fwd(BENCH[kk*matrix_size+kk], BENCH[kk*matrix_size+jj], submatrix_size);
}
for (ii=kk+1; ii<matrix_size; ii++)
if (BENCH[ii*matrix_size+kk] != NULL)
{
#pragma omp task firstprivate(kk, ii) shared(BENCH) depend(in: BENCH[kk*matrix_size+kk:submatrix_size*submatrix_size]) depend(inout: BENCH[ii*matrix_size+kk:submatrix_size*submatrix_size])
bdiv (BENCH[kk*matrix_size+kk], BENCH[ii*matrix_size+kk], submatrix_size);
}
for (ii=kk+1; ii<matrix_size; ii++)
if (BENCH[ii*matrix_size+kk] != NULL)
for (jj=kk+1; jj<matrix_size; jj++)
if (BENCH[kk*matrix_size+jj] != NULL)
{
if (BENCH[ii*matrix_size+jj]==NULL) BENCH[ii*matrix_size+jj] = allocate_clean_block(submatrix_size);
#pragma omp task firstprivate(kk, jj, ii) shared(BENCH) \
depend(in: BENCH[ii*matrix_size+kk:submatrix_size*submatrix_size], BENCH[kk*matrix_size+jj:submatrix_size*submatrix_size]) \
depend(inout: BENCH[ii*matrix_size+jj:submatrix_size*submatrix_size])
bmod(BENCH[ii*matrix_size+kk], BENCH[kk*matrix_size+jj], BENCH[ii*matrix_size+jj], submatrix_size);
}
}
#pragma omp taskwait
}
}
|
zscale.c | #include "zscale.h"
#include "dzscal.h"
fint zscale_(const fnat m[static restrict 1], const fnat n[static restrict 1], double Ar[static restrict VDL], const fnat ldAr[static restrict 1], double Ai[static restrict VDL], const fnat ldAi[static restrict 1], const fint e[static restrict 1])
{
#ifndef NDEBUG
if (IS_NOT_VFPENV)
return -8;
if (*m & VDL_1)
return -1;
if (IS_NOT_ALIGNED(Ar))
return -3;
if (*ldAr < *m)
return -4;
if (*ldAr & VDL_1)
return -4;
if (IS_NOT_ALIGNED(Ai))
return -5;
if (*ldAi < *m)
return -6;
if (*ldAi & VDL_1)
return -6;
#endif /* !NDEBUG */
if (!*e)
return 0;
const double e_ = (double)*e;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(m,n,Ar,ldAr,e_)
DZSCAL_LOOP(Ar,ldAr);
#pragma omp parallel for default(none) shared(m,n,Ai,ldAi,e_)
DZSCAL_LOOP(Ai,ldAi);
return 1;
#else /* !_OPENMP */
register const VD s = _mm512_set1_pd(e_);
DZSCAL_LOOP(Ar,ldAr);
DZSCAL_LOOP(Ai,ldAi);
return 0;
#endif /* ?_OPENMP */
}
|
gen_fffc.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "_hypre_parcsr_mv.h"
#include "_hypre_lapack.h"
#include "_hypre_blas.h"
/* -----------------------------------------------------------------------------
* generate AFF or AFC
* ----------------------------------------------------------------------------- */
HYPRE_Int
hypre_ParCSRMatrixGenerateFFFC( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *cpts_starts,
hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix **A_FC_ptr,
hypre_ParCSRMatrix **A_FF_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
/* diag part of S */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
/* off-diag part of S */
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *A_FC;
hypre_CSRMatrix *A_FC_diag, *A_FC_offd;
HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j=NULL;
HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data=NULL;
HYPRE_Int num_cols_offd_A_FC;
HYPRE_BigInt *col_map_offd_A_FC = NULL;
hypre_ParCSRMatrix *A_FF;
hypre_CSRMatrix *A_FF_diag, *A_FF_offd;
HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j;
HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data;
HYPRE_Int num_cols_offd_A_FF;
HYPRE_BigInt *col_map_offd_A_FF = NULL;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_fine;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *fine_to_fine_offd = NULL;
HYPRE_Int i, j, jj;
HYPRE_Int startc, index;
HYPRE_Int cpt, fpt, row;
HYPRE_Int *CF_marker_offd = NULL, *marker_offd=NULL;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_convert;
HYPRE_BigInt *big_convert_offd = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts;
HYPRE_Int my_id, num_procs, num_sends;
HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC;
HYPRE_Int n_Fpts;
HYPRE_Int *cpt_array, *fpt_array;
HYPRE_Int start, stop;
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,jj,start,stop,row,cpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num+1]++;
}
else
{
fpt_array[my_thread_num+1]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i+1] += cpt_array[i];
fpt_array[i+1] += fpt_array[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cpt = cpt_array[my_thread_num];
fpt = fpt_array[my_thread_num];
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
fine_to_coarse[i] = cpt++;
fine_to_fine[i] = -1;
}
else
{
fine_to_fine[i] = fpt++;
fine_to_coarse[i] = -1;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
HYPRE_BigInt big_Fpts;
n_Fpts = fpt_array[num_threads];
big_Fpts = n_Fpts;
#ifdef HYPRE_NO_GLOBAL_PARTITION
fpts_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_Fpts, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
fpts_starts[0] = fpts_starts[1] - big_Fpts;
if (my_id == num_procs - 1)
{
total_global_fpts = fpts_starts[1];
total_global_cpts = cpts_starts[1];
}
hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&big_Fpts, 1, HYPRE_MPI_BIG_INT, &fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm);
fpts_starts[0] = 0;
for (i = 2; i < num_procs+1; i++)
{
fpts_starts[i] += fpts_starts[i-1];
}
total_global_fpts = fpts_starts[num_procs];
total_global_cpts = cpts_starts[num_procs];
#endif
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0];
#else
big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[my_id];
#endif
}
else
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0];
#else
big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[my_id];
#endif
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
index = 0;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] < 0)
{
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
marker_offd[S_offd_j[j]] = 1;
}
}
}
num_cols_offd_A_FC = 0;
num_cols_offd_A_FF = 0;
if (num_cols_A_offd)
{
for (i=0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] > 0 && marker_offd[i] > 0)
{
fine_to_coarse_offd[i] = num_cols_offd_A_FC++;
fine_to_fine_offd[i] = -1;
}
else if (CF_marker_offd[i] < 0 && marker_offd[i] > 0)
{
fine_to_fine_offd[i] = num_cols_offd_A_FF++;
fine_to_coarse_offd[i] = -1;
}
}
col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST);
col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST);
cpt = 0;
fpt = 0;
for (i=0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] > 0 && marker_offd[i] > 0)
{
col_map_offd_A_FC[cpt++] = big_convert_offd[i];
}
else if (CF_marker_offd[i] < 0 && marker_offd[i] > 0)
{
col_map_offd_A_FF[fpt++] = big_convert_offd[i];
}
}
}
A_FF_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
A_FC_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
A_FF_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
A_FC_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
d_count_FC = 0;
d_count_FF = 0;
o_count_FC = 0;
o_count_FF = 0;
row = fpt_array[my_thread_num];
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
row++;
d_count_FF++; /* account for diagonal element */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jj = S_diag_j[j];
if (CF_marker[jj] > 0)
d_count_FC++;
else
d_count_FF++;
}
A_FF_diag_i[row] = d_count_FF;
A_FC_diag_i[row] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jj = S_offd_j[j];
if (CF_marker_offd[jj] > 0)
o_count_FC++;
else
o_count_FF++;
}
A_FF_offd_i[row] = o_count_FF;
A_FC_offd_i[row] = o_count_FC;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
HYPRE_Int fpt2;
for (i=1; i<num_threads+1; i++)
{
fpt = fpt_array[i];
fpt2 = fpt_array[i-1];
if (fpt == fpt2)
{
continue;
}
A_FC_diag_i[fpt] += A_FC_diag_i[fpt2];
A_FF_diag_i[fpt] += A_FF_diag_i[fpt2];
A_FC_offd_i[fpt] += A_FC_offd_i[fpt2];
A_FF_offd_i[fpt] += A_FF_offd_i[fpt2];
}
row = fpt_array[num_threads];
d_count_FC = A_FC_diag_i[row];
d_count_FF = A_FF_diag_i[row];
o_count_FC = A_FC_offd_i[row];
o_count_FF = A_FF_offd_i[row];
A_FF_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FF, memory_location_P);
A_FC_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FC, memory_location_P);
A_FF_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FF, memory_location_P);
A_FC_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FC, memory_location_P);
A_FF_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FF, memory_location_P);
A_FC_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FC, memory_location_P);
A_FF_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FF, memory_location_P);
A_FC_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FC, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = fpt_array[my_thread_num];
d_count_FC = A_FC_diag_i[row];
d_count_FF = A_FF_diag_i[row];
o_count_FC = A_FC_offd_i[row];
o_count_FF = A_FF_offd_i[row];
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
HYPRE_Int jS, jA;
row++;
jA = A_diag_i[i];
A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]];
A_FF_diag_data[d_count_FF++] = A_diag_data[jA++];
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jA = A_diag_i[i]+1;
jS = S_diag_j[j];
while (A_diag_j[jA] != jS) jA++;
if (CF_marker[S_diag_j[j]] > 0)
{
A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]];
A_FC_diag_data[d_count_FC++] = A_diag_data[jA++];
}
else
{
A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]];
A_FF_diag_data[d_count_FF++] = A_diag_data[jA++];
}
}
A_FF_diag_i[row] = d_count_FF;
A_FC_diag_i[row] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jA = A_offd_i[i];
jS = S_offd_j[j];
while (jS != A_offd_j[jA]) jA++;
if (CF_marker_offd[S_offd_j[j]] > 0)
{
A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]];
A_FC_offd_data[o_count_FC++] = A_offd_data[jA++];
}
else
{
A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]];
A_FF_offd_data[o_count_FF++] = A_offd_data[jA++];
}
}
A_FF_offd_i[row] = o_count_FF;
A_FC_offd_i[row] = o_count_FC;
}
}
} /*end parallel region */
A_FC = hypre_ParCSRMatrixCreate(comm,
total_global_fpts,
total_global_cpts,
fpts_starts,
cpts_starts,
num_cols_offd_A_FC,
A_FC_diag_i[n_Fpts],
A_FC_offd_i[n_Fpts]);
A_FF = hypre_ParCSRMatrixCreate(comm,
total_global_fpts,
total_global_fpts,
fpts_starts,
fpts_starts,
num_cols_offd_A_FF,
A_FF_diag_i[n_Fpts],
A_FF_offd_i[n_Fpts]);
A_FC_diag = hypre_ParCSRMatrixDiag(A_FC);
hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data;
hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i;
hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j;
A_FC_offd = hypre_ParCSRMatrixOffd(A_FC);
hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data;
hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i;
hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(A_FC) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_FC) = 0;
hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC;
hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P;
A_FF_diag = hypre_ParCSRMatrixDiag(A_FF);
hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data;
hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i;
hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j;
A_FF_offd = hypre_ParCSRMatrixOffd(A_FF);
hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data;
hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i;
hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(A_FF) = 0;
hypre_ParCSRMatrixOwnsColStarts(A_FF) = 0;
hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF;
hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(big_convert, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST);
hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(fpt_array, HYPRE_MEMORY_HOST);
*A_FC_ptr = A_FC;
*A_FF_ptr = A_FF;
return hypre_error_flag;
}
/* -----------------------------------------------------------------------------
* generate AFF, AFC, for 2 stage extended interpolation
* ----------------------------------------------------------------------------- */
HYPRE_Int
hypre_ParCSRMatrixGenerateFFFC3( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *cpts_starts,
hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix **A_FC_ptr,
hypre_ParCSRMatrix **A_FF_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
/* diag part of S */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
/* off-diag part of S */
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *A_FC;
hypre_CSRMatrix *A_FC_diag, *A_FC_offd;
HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j=NULL;
HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data=NULL;
HYPRE_Int num_cols_offd_A_FC;
HYPRE_BigInt *col_map_offd_A_FC = NULL;
hypre_ParCSRMatrix *A_FF;
hypre_CSRMatrix *A_FF_diag, *A_FF_offd;
HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j;
HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data;
HYPRE_Int num_cols_offd_A_FF;
HYPRE_BigInt *col_map_offd_A_FF = NULL;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_fine;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *fine_to_fine_offd = NULL;
HYPRE_Int i, j, jj;
HYPRE_Int startc, index;
HYPRE_Int cpt, fpt, new_fpt, row, rowc;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_convert;
HYPRE_BigInt *big_convert_offd = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_BigInt total_global_fpts, total_global_cpts, total_global_new_fpts;
HYPRE_BigInt *fpts_starts, *new_fpts_starts;
HYPRE_Int my_id, num_procs, num_sends;
HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC;
HYPRE_Int n_Fpts;
HYPRE_Int n_new_Fpts;
HYPRE_Int *cpt_array, *fpt_array, *new_fpt_array;
HYPRE_Int start, stop;
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,jj,start,stop,row,rowc,cpt,new_fpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num+1]++;
}
else if (CF_marker[i] == -2)
{
new_fpt_array[my_thread_num+1]++;
fpt_array[my_thread_num+1]++;
}
else
{
fpt_array[my_thread_num+1]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i+1] += cpt_array[i];
fpt_array[i+1] += fpt_array[i];
new_fpt_array[i+1] += new_fpt_array[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cpt = cpt_array[my_thread_num];
fpt = fpt_array[my_thread_num];
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
fine_to_coarse[i] = cpt++;
fine_to_fine[i] = -1;
}
else
{
fine_to_fine[i] = fpt++;
fine_to_coarse[i] = -1;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
HYPRE_BigInt big_Fpts, big_new_Fpts;
n_Fpts = fpt_array[num_threads];
n_new_Fpts = new_fpt_array[num_threads];
big_Fpts = n_Fpts;
big_new_Fpts = n_new_Fpts;
#ifdef HYPRE_NO_GLOBAL_PARTITION
fpts_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
new_fpts_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_Fpts, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
hypre_MPI_Scan(&big_new_Fpts, new_fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
fpts_starts[0] = fpts_starts[1] - big_Fpts;
new_fpts_starts[0] = new_fpts_starts[1] - big_new_Fpts;
if (my_id == num_procs - 1)
{
total_global_new_fpts = new_fpts_starts[1];
total_global_fpts = fpts_starts[1];
total_global_cpts = cpts_starts[1];
}
hypre_MPI_Bcast(&total_global_new_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
new_fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&big_Fpts, 1, HYPRE_MPI_BIG_INT, &fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm);
hypre_MPI_Allgather(&big_new_Fpts, 1, HYPRE_MPI_BIG_INT, &new_fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm);
fpts_starts[0] = 0;
new_fpts_starts[0] = 0;
for (i = 2; i < num_procs+1; i++)
{
fpts_starts[i] += fpts_starts[i-1];
new_fpts_starts[i] += new_fpts_starts[i-1];
}
total_global_new_fpts = new_fpts_starts[num_procs];
total_global_fpts = fpts_starts[num_procs];
total_global_cpts = cpts_starts[num_procs];
#endif
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0];
#else
big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[my_id];
#endif
}
else
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0];
#else
big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[my_id];
#endif
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
index = 0;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
num_cols_offd_A_FC = 0;
num_cols_offd_A_FF = 0;
if (num_cols_A_offd)
{
for (i=0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] > 0)
{
fine_to_coarse_offd[i] = num_cols_offd_A_FC++;
fine_to_fine_offd[i] = -1;
}
else
{
fine_to_fine_offd[i] = num_cols_offd_A_FF++;
fine_to_coarse_offd[i] = -1;
}
}
col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST);
col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST);
cpt = 0;
fpt = 0;
for (i=0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] > 0)
{
col_map_offd_A_FC[cpt++] = big_convert_offd[i];
}
else
{
col_map_offd_A_FF[fpt++] = big_convert_offd[i];
}
}
}
A_FF_diag_i = hypre_CTAlloc(HYPRE_Int,n_new_Fpts+1, memory_location_P);
A_FC_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
A_FF_offd_i = hypre_CTAlloc(HYPRE_Int,n_new_Fpts+1, memory_location_P);
A_FC_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
d_count_FC = 0;
d_count_FF = 0;
o_count_FC = 0;
o_count_FF = 0;
row = new_fpt_array[my_thread_num];
rowc = fpt_array[my_thread_num];
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
row++;
rowc++;
d_count_FF++; /* account for diagonal element */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jj = S_diag_j[j];
if (CF_marker[jj] > 0)
d_count_FC++;
else
d_count_FF++;
}
A_FF_diag_i[row] = d_count_FF;
A_FC_diag_i[rowc] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jj = S_offd_j[j];
if (CF_marker_offd[jj] > 0)
o_count_FC++;
else
o_count_FF++;
}
A_FF_offd_i[row] = o_count_FF;
A_FC_offd_i[rowc] = o_count_FC;
}
else if (CF_marker[i] < 0)
{
rowc++;
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jj = S_diag_j[j];
if (CF_marker[jj] > 0)
d_count_FC++;
}
A_FC_diag_i[rowc] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jj = S_offd_j[j];
if (CF_marker_offd[jj] > 0)
o_count_FC++;
}
A_FC_offd_i[rowc] = o_count_FC;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
HYPRE_Int fpt2, new_fpt2;
for (i=1; i<num_threads+1; i++)
{
fpt = fpt_array[i];
new_fpt = new_fpt_array[i];
fpt2 = fpt_array[i-1];
new_fpt2 = new_fpt_array[i-1];
if (new_fpt != new_fpt2)
{
A_FF_diag_i[new_fpt] += A_FF_diag_i[new_fpt2];
A_FF_offd_i[new_fpt] += A_FF_offd_i[new_fpt2];
}
if (fpt != fpt2)
{
A_FC_diag_i[fpt] += A_FC_diag_i[fpt2];
A_FC_offd_i[fpt] += A_FC_offd_i[fpt2];
}
}
row = new_fpt_array[num_threads];
rowc = fpt_array[num_threads];
d_count_FC = A_FC_diag_i[rowc];
d_count_FF = A_FF_diag_i[row];
o_count_FC = A_FC_offd_i[rowc];
o_count_FF = A_FF_offd_i[row];
A_FF_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FF, memory_location_P);
A_FC_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FC, memory_location_P);
A_FF_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FF, memory_location_P);
A_FC_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FC, memory_location_P);
A_FF_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FF, memory_location_P);
A_FC_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FC, memory_location_P);
A_FF_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FF, memory_location_P);
A_FC_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FC, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = new_fpt_array[my_thread_num];
rowc = fpt_array[my_thread_num];
d_count_FC = A_FC_diag_i[rowc];
d_count_FF = A_FF_diag_i[row];
o_count_FC = A_FC_offd_i[rowc];
o_count_FF = A_FF_offd_i[row];
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
HYPRE_Int jS, jA;
row++;
rowc++;
jA = A_diag_i[i];
A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]];
A_FF_diag_data[d_count_FF++] = A_diag_data[jA++];
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jA = A_diag_i[i]+1;
jS = S_diag_j[j];
while (A_diag_j[jA] != jS) jA++;
if (CF_marker[S_diag_j[j]] > 0)
{
A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]];
A_FC_diag_data[d_count_FC++] = A_diag_data[jA++];
}
else
{
A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]];
A_FF_diag_data[d_count_FF++] = A_diag_data[jA++];
}
}
A_FF_diag_i[row] = d_count_FF;
A_FC_diag_i[rowc] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jA = A_offd_i[i];
jS = S_offd_j[j];
while (jS != A_offd_j[jA]) jA++;
if (CF_marker_offd[S_offd_j[j]] > 0)
{
A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]];
A_FC_offd_data[o_count_FC++] = A_offd_data[jA++];
}
else
{
A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]];
A_FF_offd_data[o_count_FF++] = A_offd_data[jA++];
}
}
A_FF_offd_i[row] = o_count_FF;
A_FC_offd_i[rowc] = o_count_FC;
}
else if (CF_marker[i] < 0)
{
HYPRE_Int jS, jA;
rowc++;
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jA = A_diag_i[i]+1;
jS = S_diag_j[j];
while (A_diag_j[jA] != jS) jA++;
if (CF_marker[S_diag_j[j]] > 0)
{
A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]];
A_FC_diag_data[d_count_FC++] = A_diag_data[jA++];
}
}
A_FC_diag_i[rowc] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jA = A_offd_i[i];
jS = S_offd_j[j];
while (jS != A_offd_j[jA]) jA++;
if (CF_marker_offd[S_offd_j[j]] > 0)
{
A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]];
A_FC_offd_data[o_count_FC++] = A_offd_data[jA++];
}
}
A_FC_offd_i[rowc] = o_count_FC;
}
}
} /*end parallel region */
A_FC = hypre_ParCSRMatrixCreate(comm,
total_global_fpts,
total_global_cpts,
fpts_starts,
cpts_starts,
num_cols_offd_A_FC,
A_FC_diag_i[n_Fpts],
A_FC_offd_i[n_Fpts]);
A_FF = hypre_ParCSRMatrixCreate(comm,
total_global_new_fpts,
total_global_fpts,
new_fpts_starts,
fpts_starts,
num_cols_offd_A_FF,
A_FF_diag_i[n_new_Fpts],
A_FF_offd_i[n_new_Fpts]);
A_FC_diag = hypre_ParCSRMatrixDiag(A_FC);
hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data;
hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i;
hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j;
A_FC_offd = hypre_ParCSRMatrixOffd(A_FC);
hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data;
hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i;
hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(A_FC) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_FC) = 0;
hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC;
hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P;
A_FF_diag = hypre_ParCSRMatrixDiag(A_FF);
hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data;
hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i;
hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j;
A_FF_offd = hypre_ParCSRMatrixOffd(A_FF);
hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data;
hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i;
hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(A_FF) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_FF) = 0;
hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF;
hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(big_convert, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST);
hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(fpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST);
*A_FC_ptr = A_FC;
*A_FF_ptr = A_FF;
return hypre_error_flag;
}
/* -----------------------------------------------------------------------------
* generate AFF, AFC, AFFC for 2 stage extended+i(e)interpolation
* ----------------------------------------------------------------------------- */
HYPRE_Int
hypre_ParCSRMatrixGenerateFFFCD3( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *cpts_starts,
hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix **A_FC_ptr,
hypre_ParCSRMatrix **A_FF_ptr,
HYPRE_Real **D_lambda_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
/* diag part of S */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
/* off-diag part of S */
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Real *D_lambda;
hypre_ParCSRMatrix *A_FC;
hypre_CSRMatrix *A_FC_diag, *A_FC_offd;
HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j=NULL;
HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data=NULL;
HYPRE_Int num_cols_offd_A_FC;
HYPRE_BigInt *col_map_offd_A_FC = NULL;
hypre_ParCSRMatrix *A_FF;
hypre_CSRMatrix *A_FF_diag, *A_FF_offd;
HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j;
HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data;
HYPRE_Int num_cols_offd_A_FF;
HYPRE_BigInt *col_map_offd_A_FF = NULL;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_fine;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *fine_to_fine_offd = NULL;
HYPRE_Int i, j, jj;
HYPRE_Int startc, index;
HYPRE_Int cpt, fpt, new_fpt, row, rowc;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_convert;
HYPRE_BigInt *big_convert_offd = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_BigInt total_global_fpts, total_global_cpts, total_global_new_fpts;
HYPRE_BigInt *fpts_starts, *new_fpts_starts;
HYPRE_Int my_id, num_procs, num_sends;
HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC;
HYPRE_Int n_Fpts;
HYPRE_Int n_new_Fpts;
HYPRE_Int *cpt_array, *fpt_array, *new_fpt_array;
HYPRE_Int start, stop;
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,jj,start,stop,row,rowc,cpt,new_fpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num+1]++;
}
else if (CF_marker[i] == -2)
{
new_fpt_array[my_thread_num+1]++;
fpt_array[my_thread_num+1]++;
}
else
{
fpt_array[my_thread_num+1]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i+1] += cpt_array[i];
fpt_array[i+1] += fpt_array[i];
new_fpt_array[i+1] += new_fpt_array[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cpt = cpt_array[my_thread_num];
fpt = fpt_array[my_thread_num];
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
fine_to_coarse[i] = cpt++;
fine_to_fine[i] = -1;
}
else
{
fine_to_fine[i] = fpt++;
fine_to_coarse[i] = -1;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
HYPRE_BigInt big_Fpts, big_new_Fpts;
n_Fpts = fpt_array[num_threads];
n_new_Fpts = new_fpt_array[num_threads];
big_Fpts = n_Fpts;
big_new_Fpts = n_new_Fpts;
#ifdef HYPRE_NO_GLOBAL_PARTITION
fpts_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
new_fpts_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
hypre_MPI_Scan(&big_Fpts, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
hypre_MPI_Scan(&big_new_Fpts, new_fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
fpts_starts[0] = fpts_starts[1] - big_Fpts;
new_fpts_starts[0] = new_fpts_starts[1] - big_new_Fpts;
if (my_id == num_procs - 1)
{
total_global_new_fpts = new_fpts_starts[1];
total_global_fpts = fpts_starts[1];
total_global_cpts = cpts_starts[1];
}
hypre_MPI_Bcast(&total_global_new_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
new_fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&big_Fpts, 1, HYPRE_MPI_BIG_INT, &fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm);
hypre_MPI_Allgather(&big_new_Fpts, 1, HYPRE_MPI_BIG_INT, &new_fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm);
fpts_starts[0] = 0;
new_fpts_starts[0] = 0;
for (i = 2; i < num_procs+1; i++)
{
fpts_starts[i] += fpts_starts[i-1];
new_fpts_starts[i] += new_fpts_starts[i-1];
}
total_global_new_fpts = new_fpts_starts[num_procs];
total_global_fpts = fpts_starts[num_procs];
total_global_cpts = cpts_starts[num_procs];
#endif
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0];
#else
big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[my_id];
#endif
}
else
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0];
#else
big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[my_id];
#endif
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
index = 0;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
num_cols_offd_A_FC = 0;
num_cols_offd_A_FF = 0;
if (num_cols_A_offd)
{
for (i=0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] > 0)
{
fine_to_coarse_offd[i] = num_cols_offd_A_FC++;
fine_to_fine_offd[i] = -1;
}
else
{
fine_to_fine_offd[i] = num_cols_offd_A_FF++;
fine_to_coarse_offd[i] = -1;
}
}
col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST);
col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST);
cpt = 0;
fpt = 0;
for (i=0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] > 0)
{
col_map_offd_A_FC[cpt++] = big_convert_offd[i];
}
else
{
col_map_offd_A_FF[fpt++] = big_convert_offd[i];
}
}
}
A_FF_diag_i = hypre_CTAlloc(HYPRE_Int,n_new_Fpts+1, memory_location_P);
A_FC_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
A_FF_offd_i = hypre_CTAlloc(HYPRE_Int,n_new_Fpts+1, memory_location_P);
A_FC_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P);
D_lambda = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
d_count_FC = 0;
d_count_FF = 0;
o_count_FC = 0;
o_count_FF = 0;
row = new_fpt_array[my_thread_num];
rowc = fpt_array[my_thread_num];
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
row++;
rowc++;
d_count_FF++; /* account for diagonal element */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jj = S_diag_j[j];
if (CF_marker[jj] > 0)
d_count_FC++;
else
d_count_FF++;
}
A_FF_diag_i[row] = d_count_FF;
A_FC_diag_i[rowc] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jj = S_offd_j[j];
if (CF_marker_offd[jj] > 0)
o_count_FC++;
else
o_count_FF++;
}
A_FF_offd_i[row] = o_count_FF;
A_FC_offd_i[rowc] = o_count_FC;
}
else if (CF_marker[i] < 0)
{
rowc++;
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jj = S_diag_j[j];
if (CF_marker[jj] > 0)
d_count_FC++;
}
A_FC_diag_i[rowc] = d_count_FC;
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jj = S_offd_j[j];
if (CF_marker_offd[jj] > 0)
o_count_FC++;
}
A_FC_offd_i[rowc] = o_count_FC;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
HYPRE_Int fpt2, new_fpt2;
for (i=1; i<num_threads+1; i++)
{
fpt = fpt_array[i];
new_fpt = new_fpt_array[i];
fpt2 = fpt_array[i-1];
new_fpt2 = new_fpt_array[i-1];
if (fpt != fpt2)
{
A_FC_diag_i[fpt] += A_FC_diag_i[fpt2];
A_FC_offd_i[fpt] += A_FC_offd_i[fpt2];
}
if (new_fpt != new_fpt2)
{
A_FF_diag_i[new_fpt] += A_FF_diag_i[new_fpt2];
A_FF_offd_i[new_fpt] += A_FF_offd_i[new_fpt2];
}
}
row = new_fpt_array[num_threads];
rowc = fpt_array[num_threads];
d_count_FC = A_FC_diag_i[rowc];
d_count_FF = A_FF_diag_i[row];
o_count_FC = A_FC_offd_i[rowc];
o_count_FF = A_FF_offd_i[row];
A_FF_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FF, memory_location_P);
A_FC_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FC, memory_location_P);
A_FF_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FF, memory_location_P);
A_FC_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FC, memory_location_P);
A_FF_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FF, memory_location_P);
A_FC_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FC, memory_location_P);
A_FF_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FF, memory_location_P);
A_FC_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FC, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = new_fpt_array[my_thread_num];
rowc = fpt_array[my_thread_num];
d_count_FC = A_FC_diag_i[rowc];
d_count_FF = A_FF_diag_i[row];
o_count_FC = A_FC_offd_i[rowc];
o_count_FF = A_FF_offd_i[row];
for (i=start; i < stop; i++)
{
if (CF_marker[i] == -2)
{
HYPRE_Int jS, jA;
HYPRE_Real sum = 0;
row++;
jA = A_diag_i[i];
A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]];
A_FF_diag_data[d_count_FF++] = A_diag_data[jA++];
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jA = A_diag_i[i]+1;
jS = S_diag_j[j];
while (A_diag_j[jA] != jS) jA++;
if (CF_marker[S_diag_j[j]] > 0)
{
A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]];
A_FC_diag_data[d_count_FC++] = A_diag_data[jA++];
}
else
{
sum += 1;
D_lambda[rowc] += A_diag_data[jA];
A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]];
A_FF_diag_data[d_count_FF++] = A_diag_data[jA++];
}
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jA = A_offd_i[i];
jS = S_offd_j[j];
while (jS != A_offd_j[jA]) jA++;
if (CF_marker_offd[S_offd_j[j]] > 0)
{
A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]];
A_FC_offd_data[o_count_FC++] = A_offd_data[jA++];
}
else
{
sum += 1;
D_lambda[rowc] += A_offd_data[jA];
A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]];
A_FF_offd_data[o_count_FF++] = A_offd_data[jA++];
}
}
if (sum) D_lambda[rowc] = D_lambda[rowc]/sum;
rowc++;
A_FF_diag_i[row] = d_count_FF;
A_FC_diag_i[rowc] = d_count_FC;
A_FF_offd_i[row] = o_count_FF;
A_FC_offd_i[rowc] = o_count_FC;
}
else if (CF_marker[i] < 0)
{
HYPRE_Int jS, jA;
HYPRE_Real sum = 0;
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jA = A_diag_i[i]+1;
jS = S_diag_j[j];
while (A_diag_j[jA] != jS) jA++;
if (CF_marker[S_diag_j[j]] > 0)
{
A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]];
A_FC_diag_data[d_count_FC++] = A_diag_data[jA++];
}
else
{
sum += 1;
D_lambda[rowc] += A_diag_data[jA];
}
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jA = A_offd_i[i];
jS = S_offd_j[j];
while (jS != A_offd_j[jA]) jA++;
if (CF_marker_offd[S_offd_j[j]] > 0)
{
A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]];
A_FC_offd_data[o_count_FC++] = A_offd_data[jA++];
}
else
{
sum += 1;
D_lambda[rowc] += A_offd_data[jA];
}
}
if (sum) D_lambda[rowc] = D_lambda[rowc]/sum;
rowc++;
A_FC_diag_i[rowc] = d_count_FC;
A_FC_offd_i[rowc] = o_count_FC;
}
}
} /*end parallel region */
A_FC = hypre_ParCSRMatrixCreate(comm,
total_global_fpts,
total_global_cpts,
fpts_starts,
cpts_starts,
num_cols_offd_A_FC,
A_FC_diag_i[n_Fpts],
A_FC_offd_i[n_Fpts]);
A_FF = hypre_ParCSRMatrixCreate(comm,
total_global_new_fpts,
total_global_fpts,
new_fpts_starts,
fpts_starts,
num_cols_offd_A_FF,
A_FF_diag_i[n_new_Fpts],
A_FF_offd_i[n_new_Fpts]);
A_FC_diag = hypre_ParCSRMatrixDiag(A_FC);
hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data;
hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i;
hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j;
A_FC_offd = hypre_ParCSRMatrixOffd(A_FC);
hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data;
hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i;
hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(A_FC) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_FC) = 0;
hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC;
hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P;
A_FF_diag = hypre_ParCSRMatrixDiag(A_FF);
hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data;
hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i;
hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j;
A_FF_offd = hypre_ParCSRMatrixOffd(A_FF);
hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data;
hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i;
hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(A_FF) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_FF) = 0;
hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF;
hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(big_convert, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST);
hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(fpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST);
*A_FC_ptr = A_FC;
*A_FF_ptr = A_FF;
*D_lambda_ptr = D_lambda;
return hypre_error_flag;
}
|
rhs.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB LU code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "applu.incl"
#include "timers.h"
//---------------------------------------------------------------------
// compute the right hand sides
//---------------------------------------------------------------------
void rhs()
{
//---------------------------------------------------------------------
// local variables
//---------------------------------------------------------------------
int i, j, k, m;
double q;
double tmp, utmp[ISIZ3][6], rtmp[ISIZ3][5];
double u21, u31, u41;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
if (timeron) timer_start(t_rhs);
#pragma omp parallel default(shared) private(i,j,k,m,q,flux,tmp,utmp,rtmp,\
u51im1,u41im1,u31im1,u21im1,u51i,u41i,u31i,u21i,u21, \
u51jm1,u41jm1,u31jm1,u21jm1,u51j,u41j,u31j,u21j,u31, \
u51km1,u41km1,u31km1,u21km1,u51k,u41k,u31k,u21k,u41)
{
#pragma omp for schedule(static)
for (k = 0; k < nz; k++) {
for (j = 0; j < ny; j++) {
for (i = 0; i < nx; i++) {
for (m = 0; m < 5; m++) {
rsd[k][j][i][m] = - frct[k][j][i][m];
}
tmp = 1.0 / u[k][j][i][0];
rho_i[k][j][i] = tmp;
qs[k][j][i] = 0.50 * ( u[k][j][i][1] * u[k][j][i][1]
+ u[k][j][i][2] * u[k][j][i][2]
+ u[k][j][i][3] * u[k][j][i][3] )
* tmp;
}
}
}
#pragma omp master
if (timeron) timer_start(t_rhsx);
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
#pragma omp for schedule(static) nowait
for (k = 1; k < nz - 1; k++) {
for (j = jst; j < jend; j++) {
for (i = 0; i < nx; i++) {
flux[i][0] = u[k][j][i][1];
u21 = u[k][j][i][1] * rho_i[k][j][i];
q = qs[k][j][i];
flux[i][1] = u[k][j][i][1] * u21 + C2 * ( u[k][j][i][4] - q );
flux[i][2] = u[k][j][i][2] * u21;
flux[i][3] = u[k][j][i][3] * u21;
flux[i][4] = ( C1 * u[k][j][i][4] - C2 * q ) * u21;
}
for (i = ist; i < iend; i++) {
for (m = 0; m < 5; m++) {
rsd[k][j][i][m] = rsd[k][j][i][m]
- tx2 * ( flux[i+1][m] - flux[i-1][m] );
}
}
for (i = ist; i < nx; i++) {
tmp = rho_i[k][j][i];
u21i = tmp * u[k][j][i][1];
u31i = tmp * u[k][j][i][2];
u41i = tmp * u[k][j][i][3];
u51i = tmp * u[k][j][i][4];
tmp = rho_i[k][j][i-1];
u21im1 = tmp * u[k][j][i-1][1];
u31im1 = tmp * u[k][j][i-1][2];
u41im1 = tmp * u[k][j][i-1][3];
u51im1 = tmp * u[k][j][i-1][4];
flux[i][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][2] = tx3 * ( u31i - u31im1 );
flux[i][3] = tx3 * ( u41i - u41im1 );
flux[i][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i*u21i + u31i*u31i + u41i*u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i < iend; i++) {
rsd[k][j][i][0] = rsd[k][j][i][0]
+ dx1 * tx1 * ( u[k][j][i-1][0]
- 2.0 * u[k][j][i][0]
+ u[k][j][i+1][0] );
rsd[k][j][i][1] = rsd[k][j][i][1]
+ tx3 * C3 * C4 * ( flux[i+1][1] - flux[i][1] )
+ dx2 * tx1 * ( u[k][j][i-1][1]
- 2.0 * u[k][j][i][1]
+ u[k][j][i+1][1] );
rsd[k][j][i][2] = rsd[k][j][i][2]
+ tx3 * C3 * C4 * ( flux[i+1][2] - flux[i][2] )
+ dx3 * tx1 * ( u[k][j][i-1][2]
- 2.0 * u[k][j][i][2]
+ u[k][j][i+1][2] );
rsd[k][j][i][3] = rsd[k][j][i][3]
+ tx3 * C3 * C4 * ( flux[i+1][3] - flux[i][3] )
+ dx4 * tx1 * ( u[k][j][i-1][3]
- 2.0 * u[k][j][i][3]
+ u[k][j][i+1][3] );
rsd[k][j][i][4] = rsd[k][j][i][4]
+ tx3 * C3 * C4 * ( flux[i+1][4] - flux[i][4] )
+ dx5 * tx1 * ( u[k][j][i-1][4]
- 2.0 * u[k][j][i][4]
+ u[k][j][i+1][4] );
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
rsd[k][j][1][m] = rsd[k][j][1][m]
- dssp * ( + 5.0 * u[k][j][1][m]
- 4.0 * u[k][j][2][m]
+ u[k][j][3][m] );
rsd[k][j][2][m] = rsd[k][j][2][m]
- dssp * ( - 4.0 * u[k][j][1][m]
+ 6.0 * u[k][j][2][m]
- 4.0 * u[k][j][3][m]
+ u[k][j][4][m] );
}
for (i = 3; i < nx - 3; i++) {
for (m = 0; m < 5; m++) {
rsd[k][j][i][m] = rsd[k][j][i][m]
- dssp * ( u[k][j][i-2][m]
- 4.0 * u[k][j][i-1][m]
+ 6.0 * u[k][j][i][m]
- 4.0 * u[k][j][i+1][m]
+ u[k][j][i+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[k][j][nx-3][m] = rsd[k][j][nx-3][m]
- dssp * ( u[k][j][nx-5][m]
- 4.0 * u[k][j][nx-4][m]
+ 6.0 * u[k][j][nx-3][m]
- 4.0 * u[k][j][nx-2][m] );
rsd[k][j][nx-2][m] = rsd[k][j][nx-2][m]
- dssp * ( u[k][j][nx-4][m]
- 4.0 * u[k][j][nx-3][m]
+ 5.0 * u[k][j][nx-2][m] );
}
}
}
#pragma omp master
{
if (timeron) timer_stop(t_rhsx);
if (timeron) timer_start(t_rhsy);
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
#pragma omp for schedule(static)
for (k = 1; k < nz - 1; k++) {
for (i = ist; i < iend; i++) {
for (j = 0; j < ny; j++) {
flux[j][0] = u[k][j][i][2];
u31 = u[k][j][i][2] * rho_i[k][j][i];
q = qs[k][j][i];
flux[j][1] = u[k][j][i][1] * u31;
flux[j][2] = u[k][j][i][2] * u31 + C2 * (u[k][j][i][4]-q);
flux[j][3] = u[k][j][i][3] * u31;
flux[j][4] = ( C1 * u[k][j][i][4] - C2 * q ) * u31;
}
for (j = jst; j < jend; j++) {
for (m = 0; m < 5; m++) {
rsd[k][j][i][m] = rsd[k][j][i][m]
- ty2 * ( flux[j+1][m] - flux[j-1][m] );
}
}
for (j = jst; j < ny; j++) {
tmp = rho_i[k][j][i];
u21j = tmp * u[k][j][i][1];
u31j = tmp * u[k][j][i][2];
u41j = tmp * u[k][j][i][3];
u51j = tmp * u[k][j][i][4];
tmp = rho_i[k][j-1][i];
u21jm1 = tmp * u[k][j-1][i][1];
u31jm1 = tmp * u[k][j-1][i][2];
u41jm1 = tmp * u[k][j-1][i][3];
u51jm1 = tmp * u[k][j-1][i][4];
flux[j][1] = ty3 * ( u21j - u21jm1 );
flux[j][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[j][3] = ty3 * ( u41j - u41jm1 );
flux[j][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j*u21j + u31j*u31j + u41j*u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j < jend; j++) {
rsd[k][j][i][0] = rsd[k][j][i][0]
+ dy1 * ty1 * ( u[k][j-1][i][0]
- 2.0 * u[k][j][i][0]
+ u[k][j+1][i][0] );
rsd[k][j][i][1] = rsd[k][j][i][1]
+ ty3 * C3 * C4 * ( flux[j+1][1] - flux[j][1] )
+ dy2 * ty1 * ( u[k][j-1][i][1]
- 2.0 * u[k][j][i][1]
+ u[k][j+1][i][1] );
rsd[k][j][i][2] = rsd[k][j][i][2]
+ ty3 * C3 * C4 * ( flux[j+1][2] - flux[j][2] )
+ dy3 * ty1 * ( u[k][j-1][i][2]
- 2.0 * u[k][j][i][2]
+ u[k][j+1][i][2] );
rsd[k][j][i][3] = rsd[k][j][i][3]
+ ty3 * C3 * C4 * ( flux[j+1][3] - flux[j][3] )
+ dy4 * ty1 * ( u[k][j-1][i][3]
- 2.0 * u[k][j][i][3]
+ u[k][j+1][i][3] );
rsd[k][j][i][4] = rsd[k][j][i][4]
+ ty3 * C3 * C4 * ( flux[j+1][4] - flux[j][4] )
+ dy5 * ty1 * ( u[k][j-1][i][4]
- 2.0 * u[k][j][i][4]
+ u[k][j+1][i][4] );
}
}
//---------------------------------------------------------------------
// fourth-order dissipation
//---------------------------------------------------------------------
for (i = ist; i < iend; i++) {
for (m = 0; m < 5; m++) {
rsd[k][1][i][m] = rsd[k][1][i][m]
- dssp * ( + 5.0 * u[k][1][i][m]
- 4.0 * u[k][2][i][m]
+ u[k][3][i][m] );
rsd[k][2][i][m] = rsd[k][2][i][m]
- dssp * ( - 4.0 * u[k][1][i][m]
+ 6.0 * u[k][2][i][m]
- 4.0 * u[k][3][i][m]
+ u[k][4][i][m] );
}
}
for (j = 3; j < ny - 3; j++) {
for (i = ist; i < iend; i++) {
for (m = 0; m < 5; m++) {
rsd[k][j][i][m] = rsd[k][j][i][m]
- dssp * ( u[k][j-2][i][m]
- 4.0 * u[k][j-1][i][m]
+ 6.0 * u[k][j][i][m]
- 4.0 * u[k][j+1][i][m]
+ u[k][j+2][i][m] );
}
}
}
for (i = ist; i < iend; i++) {
for (m = 0; m < 5; m++) {
rsd[k][ny-3][i][m] = rsd[k][ny-3][i][m]
- dssp * ( u[k][ny-5][i][m]
- 4.0 * u[k][ny-4][i][m]
+ 6.0 * u[k][ny-3][i][m]
- 4.0 * u[k][ny-2][i][m] );
rsd[k][ny-2][i][m] = rsd[k][ny-2][i][m]
- dssp * ( u[k][ny-4][i][m]
- 4.0 * u[k][ny-3][i][m]
+ 5.0 * u[k][ny-2][i][m] );
}
}
}
#pragma omp master
{
if (timeron) timer_stop(t_rhsy);
if (timeron) timer_start(t_rhsz);
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
#pragma omp for schedule(static) nowait
for (j = jst; j < jend; j++) {
for (i = ist; i < iend; i++) {
for (k = 0; k < nz; k++) {
utmp[k][0] = u[k][j][i][0];
utmp[k][1] = u[k][j][i][1];
utmp[k][2] = u[k][j][i][2];
utmp[k][3] = u[k][j][i][3];
utmp[k][4] = u[k][j][i][4];
utmp[k][5] = rho_i[k][j][i];
}
for (k = 0; k < nz; k++) {
flux[k][0] = utmp[k][3];
u41 = utmp[k][3] * utmp[k][5];
q = qs[k][j][i];
flux[k][1] = utmp[k][1] * u41;
flux[k][2] = utmp[k][2] * u41;
flux[k][3] = utmp[k][3] * u41 + C2 * (utmp[k][4]-q);
flux[k][4] = ( C1 * utmp[k][4] - C2 * q ) * u41;
}
for (k = 1; k < nz - 1; k++) {
for (m = 0; m < 5; m++) {
rtmp[k][m] = rsd[k][j][i][m]
- tz2 * ( flux[k+1][m] - flux[k-1][m] );
}
}
for (k = 1; k < nz; k++) {
tmp = utmp[k][5];
u21k = tmp * utmp[k][1];
u31k = tmp * utmp[k][2];
u41k = tmp * utmp[k][3];
u51k = tmp * utmp[k][4];
tmp = utmp[k-1][5];
u21km1 = tmp * utmp[k-1][1];
u31km1 = tmp * utmp[k-1][2];
u41km1 = tmp * utmp[k-1][3];
u51km1 = tmp * utmp[k-1][4];
flux[k][1] = tz3 * ( u21k - u21km1 );
flux[k][2] = tz3 * ( u31k - u31km1 );
flux[k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k*u21k + u31k*u31k + u41k*u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k < nz - 1; k++) {
rtmp[k][0] = rtmp[k][0]
+ dz1 * tz1 * ( utmp[k-1][0]
- 2.0 * utmp[k][0]
+ utmp[k+1][0] );
rtmp[k][1] = rtmp[k][1]
+ tz3 * C3 * C4 * ( flux[k+1][1] - flux[k][1] )
+ dz2 * tz1 * ( utmp[k-1][1]
- 2.0 * utmp[k][1]
+ utmp[k+1][1] );
rtmp[k][2] = rtmp[k][2]
+ tz3 * C3 * C4 * ( flux[k+1][2] - flux[k][2] )
+ dz3 * tz1 * ( utmp[k-1][2]
- 2.0 * utmp[k][2]
+ utmp[k+1][2] );
rtmp[k][3] = rtmp[k][3]
+ tz3 * C3 * C4 * ( flux[k+1][3] - flux[k][3] )
+ dz4 * tz1 * ( utmp[k-1][3]
- 2.0 * utmp[k][3]
+ utmp[k+1][3] );
rtmp[k][4] = rtmp[k][4]
+ tz3 * C3 * C4 * ( flux[k+1][4] - flux[k][4] )
+ dz5 * tz1 * ( utmp[k-1][4]
- 2.0 * utmp[k][4]
+ utmp[k+1][4] );
}
//---------------------------------------------------------------------
// fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
rsd[1][j][i][m] = rtmp[1][m]
- dssp * ( + 5.0 * utmp[1][m]
- 4.0 * utmp[2][m]
+ utmp[3][m] );
rsd[2][j][i][m] = rtmp[2][m]
- dssp * ( - 4.0 * utmp[1][m]
+ 6.0 * utmp[2][m]
- 4.0 * utmp[3][m]
+ utmp[4][m] );
}
for (k = 3; k < nz - 3; k++) {
for (m = 0; m < 5; m++) {
rsd[k][j][i][m] = rtmp[k][m]
- dssp * ( utmp[k-2][m]
- 4.0 * utmp[k-1][m]
+ 6.0 * utmp[k][m]
- 4.0 * utmp[k+1][m]
+ utmp[k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nz-3][j][i][m] = rtmp[nz-3][m]
- dssp * ( utmp[nz-5][m]
- 4.0 * utmp[nz-4][m]
+ 6.0 * utmp[nz-3][m]
- 4.0 * utmp[nz-2][m] );
rsd[nz-2][j][i][m] = rtmp[nz-2][m]
- dssp * ( utmp[nz-4][m]
- 4.0 * utmp[nz-3][m]
+ 5.0 * utmp[nz-2][m] );
}
}
}
} //end parallel
if (timeron) timer_stop(t_rhsz);
if (timeron) timer_stop(t_rhs);
}
|
GB_unaryop__minv_uint32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_int16
// op(A') function: GB_tran__minv_uint32_int16
// C type: uint32_t
// A type: int16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_int16
(
uint32_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
memcpy-tutorial.c | // -----------------------------------------------------------------------------
//
// "CAPIPrecis"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : memcpy.c
// Create : 2019-09-28 14:41:30
// Revise : 2019-11-29 11:17:40
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "mt19937.h"
#include "timer.h"
#include "myMalloc.h"
#include "config.h"
#include "memcpy-tutorial.h"
struct DataArraysTut *newDataArraysTut(struct Arguments *arguments){
struct DataArraysTut *dataArraysTut = (struct DataArraysTut *) my_malloc(sizeof(struct DataArraysTut));
dataArraysTut->size = arguments->size;
dataArraysTut->array_send = (uint32_t *) my_malloc(sizeof(uint32_t)* (dataArraysTut->size));
dataArraysTut->array_receive = (uint32_t *) my_malloc(sizeof(uint32_t)* (dataArraysTut->size));
return dataArraysTut;
}
void freeDataArraysTut(struct DataArraysTut *dataArraysTut){
if(dataArraysTut){
if(dataArraysTut->array_send)
free(dataArraysTut->array_send);
if(dataArraysTut->array_receive)
free(dataArraysTut->array_receive);
free(dataArraysTut);
}
}
void initializeDataArraysTut(struct DataArraysTut *dataArraysTut){
uint64_t i;
#pragma omp parallel for
for(i = 0; i < dataArraysTut->size; i++)
{
dataArraysTut->array_send[i] = i;
dataArraysTut->array_receive[i] = 0;
}
}
void copyDataArraysTut(struct DataArraysTut *dataArraysTut, struct Arguments *arguments){
uint64_t i;
#pragma omp parallel for
for(i = 0; i < dataArraysTut->size; i++)
{
//generate READ_CL_NA array_send[i] // read engine
//generate WRITE_CL array_receive[i] // write engine
dataArraysTut->array_receive[i] = dataArraysTut->array_send[i];
}
}
uint64_t compareDataArraysTut(struct DataArraysTut *dataArraysTut){
uint64_t missmatch = 0;
uint64_t i;
#pragma omp parallel for shared(dataArraysTut) reduction(+: missmatch)
for(i = 0; i < dataArraysTut->size; i++)
{
if(dataArraysTut->array_receive[i] != dataArraysTut->array_send[i]){
// printf("[%llu] %u != %u\n",i , dataArraysTut->array_receive[i], dataArraysTut->array_send[i] );
missmatch ++;
}
}
return missmatch;
} |
GB_binop__plus_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp32)
// A*D function (colscale): GB (_AxD__plus_fp32)
// D*A function (rowscale): GB (_DxB__plus_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp32)
// C=scalar+B GB (_bind1st__plus_fp32)
// C=scalar+B' GB (_bind1st_tran__plus_fp32)
// C=A+scalar GB (_bind2nd__plus_fp32)
// C=A'+scalar GB (_bind2nd_tran__plus_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FP32 || GxB_NO_PLUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_uint16
// op(A') function: GB_tran__abs_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_uint16
(
int16_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_bf16s(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
unsigned short* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
const unsigned short* kptr = (const unsigned short*)weight_data_bf16 + maxk * channels * p;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const unsigned short* sptr = m.row<unsigned short>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
float val = bfloat16_to_float32(sptr[space_ofs[k]]);
float wt = bfloat16_to_float32(kptr[k]);
sum += val * wt;
}
kptr += maxk;
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = float32_to_bfloat16(sum);
}
outptr += outw;
}
}
}
|
par_grid_list.c | #include "par_grid_list.h"
int main(int argc, char* argv[]){
char* file; /**< Input data file name */
int generations = 0; /**< Number of generations to proccess */
int cube_size = 0; /**< Size of the 3D space */
GraphNode*** graph; /**< Graph representation - 2D array of lists */
List* update; /**< Contains the information of nodes that might change state */
/* Iterator variables */
int g, i, j;
GraphNode* g_it = NULL;
Node* it = NULL;
/* Lock variables */
omp_lock_t list_lock;
omp_lock_t** graph_lock;
parseArgs(argc, argv, &file, &generations);
/* Create an empty list, with size 0 */
update = listCreate();
graph = parseFile(file, update, &cube_size);
/* Initialize lock variables */
omp_init_lock(&list_lock);
graph_lock = (omp_lock_t**)malloc(cube_size * sizeof(omp_lock_t*));
for(i = 0; i < cube_size; i++){
graph_lock[i] = (omp_lock_t*) malloc(cube_size * sizeof(omp_lock_t));
for(j = 0; j < cube_size; j++){
omp_init_lock(&(graph_lock[i][j]));
}
}
double start = omp_get_wtime(); // Start Timer
for(g = 1; g <= generations; g++){
/* Convert list to vector */
i = 0;
int size = update->size;
Node** vector = (Node**) malloc(sizeof(Node*) * size);
for (it = listFirst(update); it != NULL; it = it->next){
vector[i++] = it;
}
Node** proccessed;
/* For each live node, inform its neighbors */
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < size; i++){
//printf("Neighbours processing by thread: %d\n", omp_get_thread_num());
visitNeighbours(graph, graph_lock, cube_size, update, &list_lock, vector[i]->x, vector[i]->y, vector[i]->z);
}
/* Convert list to vector */
#pragma omp single
{
i = 0;
size = update->size;
proccessed = (Node**) malloc(sizeof(Node*) * size);
for (it = listFirst(update); it != NULL; it = it->next){
proccessed[i++] = it;
}
}
/* Update graph and update set */
#pragma omp for
for (i = 0; i < size; i++){
//printf("Update graph processing by thread: %d\n", omp_get_thread_num());
Node* it = proccessed[i];
unsigned char live_neighbours = it->ptr->neighbours;
it->ptr->neighbours = 0;
if(it->ptr->state == ALIVE){
if(live_neighbours < 2 || live_neighbours > 4){
graphNodeRemove(&(graph[it->x][it->y]), it->z, &(graph_lock[it->x][it->y]));
it->x = REMOVE;
}
}else{
if(live_neighbours == 2 || live_neighbours == 3){
it->ptr->state = ALIVE;
}
else{
graphNodeRemove(&(graph[it->x][it->y]), it->z, &(graph_lock[it->x][it->y]));
it->x = REMOVE;
}
}
}
}
/* Clean dead cells from the set */
listCleanup(update);
free(proccessed);
free(vector);
}
double end = omp_get_wtime(); // Stop Timer
/* Print the final set of live cells */
printAndSortActive(graph, cube_size);
time_print("%f\n", end - start);
/* Free resources */
freeGraph(graph, cube_size);
listDelete(update);
omp_destroy_lock(&list_lock);
for(i = 0; i < cube_size; i++){
for(j=0; j<cube_size; j++){
omp_destroy_lock(&(graph_lock[i][j]));
}
}
free(file);
return 0;
}
/**************************************************************************/
void visitNeighbours(GraphNode*** graph, omp_lock_t** graph_lock, int cube_size,
List* list, omp_lock_t* list_lock,
coordinate x, coordinate y, coordinate z){
GraphNode* ptr;
coordinate x1, x2, y1, y2, z1, z2;
x1 = (x+1)%cube_size; x2 = (x-1) < 0 ? (cube_size-1) : (x-1);
y1 = (y+1)%cube_size; y2 = (y-1) < 0 ? (cube_size-1) : (y-1);
z1 = (z+1)%cube_size; z2 = (z-1) < 0 ? (cube_size-1) : (z-1);
/* If a cell is visited for the first time, add it to the update list, for fast access */
if(graphNodeAddNeighbour(&(graph[x1][y]), z, &ptr, &graph_lock[x1][y])){
listInsertLock(list, x1, y, z, ptr, list_lock);
}
if(graphNodeAddNeighbour(&(graph[x2][y]), z, &ptr, &graph_lock[x2][y])){
listInsertLock(list, x2, y, z, ptr, list_lock);
}
if(graphNodeAddNeighbour(&(graph[x][y1]), z, &ptr, &graph_lock[x][y1])){
listInsertLock(list, x, y1, z, ptr, list_lock);
}
if(graphNodeAddNeighbour(&(graph[x][y2]), z, &ptr, &graph_lock[x][y2])){
listInsertLock(list, x, y2, z, ptr, list_lock);
}
if(graphNodeAddNeighbour(&(graph[x][y]), z1, &ptr, &graph_lock[x][y])){
listInsertLock(list, x, y, z1, ptr, list_lock);
}
if(graphNodeAddNeighbour(&(graph[x][y]), z2, &ptr, &graph_lock[x][y])){
listInsertLock(list, x, y, z2, ptr, list_lock);
}
}
/**************************************************************************/
GraphNode*** initGraph(int size){
int i,j;
GraphNode*** graph = (GraphNode***) malloc(sizeof(GraphNode**) * size);
for (i = 0; i < size; i++){
graph[i] = (GraphNode**) malloc(sizeof(GraphNode*) * size);
for (j = 0; j < size; j++){
graph[i][j] = NULL;
}
}
return graph;
}
/**************************************************************************/
void freeGraph(GraphNode*** graph, int size){
int i, j;
if (graph != NULL){
for (i = 0; i < size; i++){
for (j = 0; j < size; j++){
graphNodeDelete(graph[i][j]);
}
free(graph[i]);
}
free(graph);
}
}
/**************************************************************************/
void printAndSortActive(GraphNode*** graph, int cube_size){
int x,y;
GraphNode* it;
for (x = 0; x < cube_size; ++x){
for (y = 0; y < cube_size; ++y){
/* Sort the list by ascending coordinate z */
graphNodeSort(&(graph[x][y]));
for (it = graph[x][y]; it != NULL; it = it->next){
/* At the end of each generation, the graph is guranteed to only have live cells */
out_print("%d %d %d\n", x, y, it->z);
}
}
}
}
/**************************************************************************/
void parseArgs(int argc, char* argv[], char** file, int* generations){
if (argc == 3){
char* file_name = malloc(sizeof(char) * (strlen(argv[1]) + 1));
strcpy(file_name, argv[1]);
*file = file_name;
*generations = atoi(argv[2]);
if (*generations > 0 && file_name != NULL)
return;
}
printf("Usage: %s [data_file.in] [number_generations]", argv[0]);
exit(EXIT_FAILURE);
}
/**************************************************************************/
GraphNode*** parseFile(char* file, List* list, int* cube_size){
int first = 0;
char line[BUFFER_SIZE];
int x, y, z;
FILE* fp = fopen(file, "r");
if(fp == NULL){
err_print("Please input a valid file name");
exit(EXIT_FAILURE);
}
GraphNode*** graph;
while(fgets(line, sizeof(line), fp)){
if(!first){
if(sscanf(line, "%d\n", cube_size) == 1){
first = 1;
graph = initGraph(*cube_size);
}
}else{
if(sscanf(line, "%d %d %d\n", &x, &y, &z) == 3){
/* Insert live nodes in the graph and the update set */
graph[x][y] = graphNodeInsert(graph[x][y], z, ALIVE);
listInsert(list, x, y, z, (GraphNode*) (graph[x][y]));
}
}
}
fclose(fp);
return graph;
} |
matrix-puzzle.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void clear_stream(FILE *in)
{
int ch;
clearerr(in);
do
ch = getc(in);
while (ch != '\n' && ch != EOF);
clearerr(in);
}
int main(int argc, char *argv[])
{
int rows, cols;
int i, j, r;
// accept user input with validation
printf("# of rows: ");
fflush(stdout);
while (scanf("%d", &rows) != 1) {
clear_stream(stdin);
printf("Invalid integer!\n # of rows: ");
fflush(stdout);
}
// -------------------------
printf("# of columns: ");
fflush(stdout);
while (scanf("%d", &cols) != 1) {
clear_stream(stdin);
printf("Invalid integer!\n # of columns: ");
fflush(stdout);
}
/* make 2D dynamic array */
char** puzzle = (char**) malloc (rows * sizeof(char*));
for (i=0; i < rows; i++)
puzzle[i] = (char*) malloc(cols * sizeof(char));
srand(time(NULL));
for (i=0; i < rows; i++)
for (j=0; j < cols; j++)
{
r = rand() % 2;
if (r == 0)
puzzle[i][j] = 'O';
else
puzzle[i][j] = 'X';
}
/* print the puzzle array */
for (i=0; i < rows; i++)
{
for (j=0; j < cols; j++)
printf("%c ", puzzle[i][j]);
printf("\n");
}
// creating parallel snippet in openMP the puzzle is in shared memory
// but the counters are private, work will is disturbed statically between threads
#pragma omp parallel shared(puzzle) private(i, j)
{
#pragma omp for schedule(static)
for (i=0; i < rows; i++)
{
for (j=1; j < cols-1; j++)
{
// if there's O surrounded by Xs on left and right sides, change it to X
if (puzzle[i][j-1] == 'X' && puzzle[i][j+1] == 'X' && puzzle[i][j] == 'O')
puzzle[i][j] = 'X';
// for debugging only
// printf ("IN thread %d puzzle[%d][%d] = %c\n", omp_get_thread_num(), i, j, puzzle[i][j]);
}
}
} /* end of parallel region */
/* print the puzzle's solution */
printf("\n");
for (i=0; i < rows; i++)
{
for (j=0; j < cols; j++)
printf("%c ", puzzle[i][j]);
printf("\n");
}
}
|
simd_utils_avx_int32.h | /*
* Project : SIMD_Utils
* Version : 0.2.2
* Author : JishinMaster
* Licence : BSD-2
*/
#pragma once
#include <stdint.h>
#include "immintrin.h"
#ifdef __AVX2__
static inline void add256s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_add_epi32(_mm256_load_si256((__m256i *) (src1 + i)),
_mm256_load_si256((__m256i *) (src2 + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_add_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)),
_mm256_loadu_si256((__m256i *) (src2 + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] + src2[i];
}
}
#if 0
//Work in progress
static inline void mul256s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
if (areAligned3((uintptr_t)(src1), (uintptr_t)(src2), (uintptr_t)(dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_mul_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_mul_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] * src2[i];
}
}
#endif
static inline void sub256s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_sub_epi32(_mm256_load_si256((__m256i *) (src1 + i)),
_mm256_load_si256((__m256i *) (src2 + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_sub_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)),
_mm256_loadu_si256((__m256i *) (src2 + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] - src2[i];
}
}
static inline void addc256s(int32_t *src, int32_t value, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
const v8si tmp = _mm256_set1_epi32(value);
if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_add_epi32(tmp, _mm256_load_si256((__m256i *) (src + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_add_epi32(tmp, _mm256_loadu_si256((__m256i *) (src + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i] + value;
}
}
static inline void vectorSlope256s(int *dst, int len, int offset, int slope)
{
v8si coef = _mm256_set_epi32(7 * slope, 6 * slope, 5 * slope, 4 * slope, 3 * slope, 2 * slope, slope, 0);
v8si slope16_vec = _mm256_set1_epi32(16 * slope);
v8si curVal = _mm256_add_epi32(_mm256_set1_epi32(offset), coef);
v8si curVal2 = _mm256_add_epi32(_mm256_set1_epi32(offset), coef);
curVal2 = _mm256_add_epi32(curVal2, _mm256_set1_epi32(8 * slope));
int stop_len = len / (2 * AVX_LEN_INT32);
stop_len *= (2 * AVX_LEN_INT32);
if (((uintptr_t) (const void *) (dst) % AVX_LEN_BYTES) == 0) {
_mm256_store_epi32((__m256i *) (dst + 0), curVal);
_mm256_store_epi32((__m256i *) (dst + AVX_LEN_INT32), curVal2);
} else {
_mm256_storeu_si256((__m256i *) (dst + 0), curVal);
_mm256_storeu_si256((__m256i *) (dst + AVX_LEN_INT32), curVal2);
}
if (((uintptr_t) (const void *) (dst) % AVX_LEN_BYTES) == 0) {
for (int i = 2 * AVX_LEN_INT32; i < stop_len; i += 2 * AVX_LEN_INT32) {
curVal = _mm256_add_epi32(curVal, slope16_vec);
_mm256_store_si256((__m256i *) (dst + i), curVal);
curVal2 = _mm256_add_epi32(curVal2, slope16_vec);
_mm256_store_si256((__m256i *) (dst + i + AVX_LEN_INT32), curVal2);
}
} else {
for (int i = 2 * AVX_LEN_INT32; i < stop_len; i += 2 * AVX_LEN_INT32) {
curVal = _mm256_add_epi32(curVal, slope16_vec);
_mm256_storeu_si256((__m256i *) (dst + i), curVal);
curVal2 = _mm256_add_epi32(curVal2, slope16_vec);
_mm256_storeu_si256((__m256i *) (dst + i + AVX_LEN_INT32), curVal2);
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = offset + slope * i;
}
}
// Experimental
static inline void copy256s(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_load_si256((__m256i *) (src + i)));
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void copy256s_2(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (2 * AVX_LEN_INT32);
stop_len *= (2 * AVX_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 2 * AVX_LEN_INT32) {
__m256i tmp1 = _mm256_load_si256((__m256i *) (src + i));
__m256i tmp2 = _mm256_load_si256((__m256i *) (src + i + AVX_LEN_INT32));
_mm256_store_si256((__m256i *) (dst + i), tmp1);
_mm256_store_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2);
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy256s(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_stream_si256((__m256i *) (dst + i), _mm256_stream_load_si256((__m256i *) (src + i)));
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy256s_2(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (2 * AVX_LEN_INT32);
stop_len *= (2 * AVX_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 2 * AVX_LEN_INT32) {
__m256i tmp1 = _mm256_stream_load_si256((__m256i *) (src + i));
__m256i tmp2 = _mm256_stream_load_si256((__m256i *) (src + i + AVX_LEN_INT32));
_mm256_stream_si256((__m256i *) (dst + i), tmp1);
_mm256_stream_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2);
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy256s_4(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (4 * AVX_LEN_INT32);
stop_len *= (4 * AVX_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 4 * AVX_LEN_INT32) {
__m256i tmp1 = _mm256_stream_load_si256((__m256i *) (src + i));
__m256i tmp2 = _mm256_stream_load_si256((__m256i *) (src + i + AVX_LEN_INT32));
__m256i tmp3 = _mm256_stream_load_si256((__m256i *) (src + i + 2 * AVX_LEN_INT32));
__m256i tmp4 = _mm256_stream_load_si256((__m256i *) (src + i + 3 * AVX_LEN_INT32));
_mm256_stream_si256((__m256i *) (dst + i), tmp1);
_mm256_stream_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2);
_mm256_stream_si256((__m256i *) (dst + i + 2 * AVX_LEN_INT32), tmp3);
_mm256_stream_si256((__m256i *) (dst + i + 3 * AVX_LEN_INT32), tmp4);
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline __m256i _mm256_absdiff_epi16(__m256i a, __m256i b)
{
__m256i cmp, difab, difba;
cmp = _mm256_cmpgt_epi16(a, b);
difab = _mm256_sub_epi16(a, b);
difba = _mm256_sub_epi16(b, a);
difab = _mm256_and_si256(cmp, difab);
difba = _mm256_andnot_si256(cmp, difba);
return _mm256_or_si256(difab, difba);
}
static inline __m256i _mm256_absdiff_epi32(__m256i a, __m256i b)
{
__m256i cmp, difab, difba;
cmp = _mm256_cmpgt_epi32(a, b);
difab = _mm256_sub_epi32(a, b);
difba = _mm256_sub_epi32(b, a);
difab = _mm256_and_si256(cmp, difab);
difba = _mm256_andnot_si256(cmp, difba);
return _mm256_or_si256(difab, difba);
}
static inline __m256i _mm256_absdiff_epi8(__m256i a, __m256i b)
{
__m256i cmp, difab, difba;
cmp = _mm256_cmpgt_epi8(a, b);
difab = _mm256_sub_epi8(a, b);
difba = _mm256_sub_epi8(b, a);
difab = _mm256_and_si256(cmp, difab);
difba = _mm256_andnot_si256(cmp, difba);
return _mm256_or_si256(difab, difba);
}
static inline void absdiff16s_256s(int16_t *src1, int16_t *src2, int16_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT16;
stop_len *= AVX_LEN_INT16;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT16) {
__m256i a = _mm256_load_si256((__m256i *) (src1 + i));
__m256i b = _mm256_load_si256((__m256i *) (src2 + i));
_mm256_store_si256((__m256i *) (dst + i), _mm256_absdiff_epi16(a, b));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT16) {
__m256i a = _mm256_loadu_si256((__m256i *) (src1 + i));
__m256i b = _mm256_loadu_si256((__m256i *) (src2 + i));
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_absdiff_epi16(a, b));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = abs(src1[i] - src2[i]);
}
}
static inline void powerspect16s_256s_interleaved(complex16s_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
int j = 0;
if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
__m256i reim = _mm256_load_si256((__m256i *) ((const int16_t *) src + j));
// print8i(reim); printf("\n");
_mm256_store_si256((__m256i *) (dst + i), _mm256_madd_epi16(reim, reim));
j += AVX_LEN_INT16;
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
__m256i reim = _mm256_loadu_si256((__m256i *) ((const int16_t *) src + j));
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_madd_epi16(reim, reim));
j += AVX_LEN_INT16;
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = (int32_t) src[i].re * (int32_t) src[i].re + (int32_t) src[i].im * (int32_t) src[i].im;
}
}
#endif
|
GB_binop__bclr_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bclr_uint32
// A.*B function (eWiseMult): GB_AemultB__bclr_uint32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bclr_uint32
// C+=b function (dense accum): GB_Cdense_accumb__bclr_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint32
// C=scalar+B GB_bind1st__bclr_uint32
// C=scalar+B' GB_bind1st_tran__bclr_uint32
// C=A+scalar GB_bind2nd__bclr_uint32
// C=A'+scalar GB_bind2nd_tran__bclr_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITCLR (x, y, uint32_t, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT32 || GxB_NO_BCLR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bclr_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bclr_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bclr_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bclr_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bclr_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bclr_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, uint32_t, 32) ; \
}
GrB_Info GB_bind1st_tran__bclr_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, uint32_t, 32) ; \
}
GrB_Info GB_bind2nd_tran__bclr_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
md5test.c | /***
* How to compile:
* GCC 4.6.3: mpicc md5test.c -o md5test -fopenmp -lcrypto -lssl -std=c99
* GCC 6.3.0: mpicc md5test.c -o md5test -fopenmp -lcrypto -lssl
*
* 5 nodos no máximo
* - 1 mestre
* - O mestre só envia a mensagem para os escravos dizendo encontre essa linha e me digam se encontrar ou não.
* - Portanto o escravo irá ficar subutilizando a máquina em que estiver.
* - 5 escravos (6 livros por escravo)
* - Não pode ser 4 escravos por que são 30 livros.
* - Isso resultaria em 7.5 livros por escravo, o que não é possível fazer.
**/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include <openssl/md5.h>
#include <mpi.h>
#include <omp.h>
#define NUMBER_OF_BOOKS 30
#define NUMBER_OF_SLAVES 5
#define NUMBER_OF_BOOKS_BY_SLAVE 6
#define NUMBER_OF_THREADS 8
#define STOP_WHEN_FOUND 1
MPI_Status status; // estrutura que guarda o estado de retorno
typedef struct Line1 {
char* str;
unsigned char* md5;
} Line;
typedef struct Book1 {
int number;
size_t lines_len;
Line* lines;
} Book;
char* file_to_str(char* filepath, char* filename) {
// https://stackoverflow.com/questions/3747086/reading-the-whole-text-file-into-a-char-array-in-c
FILE* fp = fopen(filepath, "rb");
long lSize;
char* buffer;
if (!fp) {
perror(filename);
exit(1);
}
fseek(fp, 0L, SEEK_END);
lSize = ftell(fp);
rewind(fp);
/* allocate memory for entire content */
buffer = calloc(1, lSize + 1);
if (!buffer) {
fclose(fp);
fputs("memory alloc fails", stderr);
exit(1);
}
/* copy the file into the buffer */
if (1 != fread(buffer, lSize, 1, fp)) {
fclose(fp);
free(buffer);
fputs("entire read fails", stderr);
exit(1);
}
fclose(fp);
return buffer;
}
void md5_print(unsigned char* result) {
int i;
for (i = 0; i < MD5_DIGEST_LENGTH; i++)
printf("%02x", *(result + i));
printf("\n");
}
unsigned char* str_to_md5(char* str) {
unsigned char* result = (unsigned char*) malloc(MD5_DIGEST_LENGTH*sizeof(unsigned char));
MD5(str, strlen(str), result);
return result;
}
char** str_split(char* a_str, const char a_delim, size_t* len) {
// https://stackoverflow.com/questions/9210528/split-string-with-delimiters-in-c
char** result = 0;
size_t count = 0;
char* tmp = a_str;
char* last_comma = 0;
char delim[2];
delim[0] = a_delim;
delim[1] = 0;
/* Count how many elements will be extracted. */
while (*tmp) {
if (a_delim == *tmp) {
count++;
last_comma = tmp;
}
tmp++;
}
/* Add space for trailing token. */
count += last_comma < (a_str + strlen(a_str) - 1);
/* Add space for terminating null string so caller
knows where the list of returned strings ends. */
count++;
result = malloc(sizeof(char*) * count);
if (result) {
size_t idx = 0;
*len = 0;
char* token = strtok(a_str, delim);
while (token) {
*(result + idx++) = strdup(token);
if (strlen(token) > 1)
(*len)++;
token = strtok(0, delim);
}
*(result + idx) = 0;
}
return result;
}
char* load_book_i(int i) {
char filepath[25];
char filename[8];
sprintf(filepath, "plain_text_books/%d.txt", i);
sprintf(filename, "%d.txt", i);
char* whole_text = file_to_str(filepath, filename);
return whole_text;
}
void books_print(Book* books) {
int i;
for (i = 0; i < NUMBER_OF_BOOKS; i++) {
printf("Book %d; total lines %lu\n", books[i].number, books[i].lines_len);
int j;
for (j = 0; j < books[i].lines_len; j++) {
Line* line = &(books[i].lines[j]);
printf("Book %d; line %d of %lu: %s\n", books[i].number, j+1, books[i].lines_len, line->str);
md5_print(line->md5);
}
}
}
bool md5_equals(unsigned char* md5_a, unsigned char* md5_b) {
int i;
for (i = 0; i < MD5_DIGEST_LENGTH; i++)
if (*(md5_a + i) != *(md5_b + i))
return false;
return true;
}
int find_line_in_book(Book* books, unsigned char* md5) {
int i;
int book_number = -1;
#pragma omp parallel shared(md5, book_number)
#pragma omp for schedule (dynamic)
for (i = 0; i < NUMBER_OF_BOOKS_BY_SLAVE; i++) {
int j;
for (j = 0; j < books[i].lines_len; j++) {
Line* line = &(books[i].lines[j]);
if (book_number != -1) {
if (STOP_WHEN_FOUND == 1) {
break;
} else {
continue;
}
} else if (md5_equals(md5, line->md5)) {
#pragma omp atomic
book_number += books[i].number;
}
}
}
return book_number;
}
int find_line_in_books(Book* books, char* line_str) {
unsigned char* md5 = str_to_md5(line_str);
int book_number = 0;
int i;
for (i = 1; i <= NUMBER_OF_SLAVES; i++) {
MPI_Send(md5, MD5_DIGEST_LENGTH, MPI_UNSIGNED_CHAR, i, 1, MPI_COMM_WORLD);
}
free(md5);
for (i = 1; i <= NUMBER_OF_SLAVES; i++) {
int found = 0;
MPI_Recv(&found, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &status);
book_number = found > 0 ? found : 0;
if (book_number > 0) {
//printf("\nEscravo[%d]: encontrou no livro %d", i, found);
} else {
//printf("\nEscravo[%d]: não encontrou", i);
}
}
return book_number;
}
void find_all_lines_in_books(Book* books) {
int i;
for (i = 0; i < NUMBER_OF_BOOKS; i++) {
printf("\rBook %d of %d", i+1, NUMBER_OF_BOOKS);
fflush(stdout);
int j;
for (j = 0; j < books[i].lines_len; j++) {
Line* line = &(books[i].lines[j]);
char* line_str = line->str;
int book_number = find_line_in_books(books, line_str);
}
}
printf("\n");
}
size_t number_of_lines_in_books(Book* books) {
size_t all_lines_len = 0;
int i;
for (i = 0; i < NUMBER_OF_BOOKS; i++) {
all_lines_len += books[i].lines_len;
}
return all_lines_len;
}
void free_books(Book* books, int len) {
int i;
for (i = 0; i < len; i++) {
int j;
for (j = 0; j < books[i].lines_len; j++) {
Line* line = &(books[i].lines[j]);
char* line_str = line->str;
unsigned char* md5 = line->md5;
free(md5);
free(line_str);
}
Line* lines = &(books[i].lines);
// free(lines); // TODO: Freeing all lines structs not working
}
}
void update_book(Book* book, char* whole_text) {
size_t len;
char** lines = str_split(whole_text, '\n', &len);
book->lines_len = len;
book->lines = (Line*) malloc(len*sizeof(Line));
if (lines) {
int count = 0;
int j;
for (j = 0; *(lines + j); j++) {
if (strlen(*(lines + j)) > 1) {
Line* line = &(book->lines[count++]);
line->str = *(lines + j);
line->md5 = str_to_md5(*(lines + j));
}
}
}
}
int main(int argc, char** argv) {
int my_rank; // Identificador deste processo
int proc_n; // Numero de processos disparados pelo usuario na linha de comando (np)
MPI_Init(&argc , &argv); // funcao que inicializa o MPI, todo o codigo paralelo estah abaixo
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // pega pega o numero do processo atual (rank)
MPI_Comm_size(MPI_COMM_WORLD, &proc_n); // pega informacao do numero de processos (quantidade total)
if (proc_n != NUMBER_OF_SLAVES + 1) {
printf("The number of processes (%d) must be equal to the number of slaves (%d) + 1 (the master)!\n", proc_n, NUMBER_OF_SLAVES);
MPI_Finalize();
return EXIT_FAILURE;
}
if (my_rank == 0) { // papel do mestre
Book books[NUMBER_OF_BOOKS];
int slave;
int i = 1;
for (slave = 1; slave <= NUMBER_OF_SLAVES; slave++) {
int cont;
for (cont = 1; cont <= NUMBER_OF_BOOKS_BY_SLAVE; cont++) {
books[i - 1].number = i;
char* whole_text = load_book_i(i);
size_t len = strlen(whole_text);
MPI_Send(&i, 1, MPI_INT, slave, 1, MPI_COMM_WORLD);
MPI_Send(&len, 1, MPI_UNSIGNED_LONG, slave, 1, MPI_COMM_WORLD);
MPI_Send(whole_text, len, MPI_CHAR, slave, 1, MPI_COMM_WORLD);
update_book(&(books[i-1]), whole_text);
i++;
free(whole_text);
}
}
size_t all_lines_len = number_of_lines_in_books(books);
for (slave = 1; slave <= NUMBER_OF_SLAVES; slave++) {
MPI_Send(&all_lines_len, 1, MPI_UNSIGNED_LONG, slave, 1, MPI_COMM_WORLD);
}
// books_print(&books);
double starttime = MPI_Wtime();
find_all_lines_in_books(&books);
double stoptime = MPI_Wtime();
double executiontime = stoptime - starttime;
printf("Execution time: %.2f s\n", executiontime);
free_books(&books, NUMBER_OF_BOOKS);
} else { // papel do escravo
omp_set_num_threads(NUMBER_OF_THREADS);
Book books[NUMBER_OF_BOOKS_BY_SLAVE];
int i;
for (i = 1; i <= NUMBER_OF_BOOKS_BY_SLAVE; i++) {
int book_number;
MPI_Recv(&book_number, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
size_t len;
MPI_Recv(&len, 1, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD, &status);
char* whole_text = (char*) malloc((1+len)*sizeof(char));
*(whole_text + len) = '\0';
MPI_Recv(whole_text, len, MPI_CHAR, 0, 1, MPI_COMM_WORLD, &status);
books[i-1].number = book_number;
update_book(&(books[i-1]), whole_text);
free(whole_text);
}
size_t all_lines_len;
MPI_Recv(&all_lines_len, 1, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD, &status);
size_t request_number;
for (request_number = 0; request_number < all_lines_len; request_number++) {
unsigned char md5[MD5_DIGEST_LENGTH];
MPI_Recv(&md5, MD5_DIGEST_LENGTH, MPI_UNSIGNED_CHAR, 0, 1, MPI_COMM_WORLD, &status);
int found = find_line_in_book(&books, &md5);
MPI_Send(&found, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
}
free_books(&books, NUMBER_OF_BOOKS_BY_SLAVE);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
|
FG_vector.h | #ifndef __FG_VECTOR_H__
#define __FG_VECTOR_H__
/*
* Copyright 2014 Open Connectome Project (http://openconnecto.me)
* Written by Da Zheng (zhengda1936@gmail.com)
*
* This file is part of FlashGraph.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <set>
#include <fstream>
#include "graph_engine.h"
#include "stat.h"
/**
* \brief FlashGraph vector that provides several parallelized methods
* when compared to an STL-vector. <br>
* **NOTE**: Not an STL-compatible data structure. This vector is also
* ideally used with numeric data types. <br>
* Methods marked with the keyword **parallel** are parallelized implementations.
*/
template<class T>
class FG_vector
{
// TODO I might need to split the vector into partitions.
std::vector<T> eles;
FG_vector(graph_engine::ptr graph) {
eles.resize(graph->get_num_vertices());
}
FG_vector(size_t size) {
eles.resize(size);
}
public:
typedef typename std::shared_ptr<FG_vector<T> > ptr; /** Smart pointer for object access */
/**
* \brief Create a vector of the length the same as the number of vertices
* in the graph. An object of this
* class should be created using this or the `create(size_t size)`
* method.
* \param graph A shared pointer to a graph engine object. This is generally
* the graph for which you are creating the vector.
*/
static ptr create(graph_engine::ptr graph) {
return ptr(new FG_vector<T>(graph));
}
/**
* \brief Create a vector of the specified length. An object of this
* class should be created using this or the `create(graph_engine::ptr graph)`
* method.
* \param size The length of the vector you desire.
*/
static ptr create(size_t size) {
return ptr(new FG_vector<T>(size));
}
/**
* \brief Initialize the vector a single value as specified by parameter 1.
*
* \param v The initialization parameter for the vector data.
* **parallel**
*/
void init(T v) {
#pragma omp parallel for
for (size_t i = 0; i < eles.size(); i++)
eles[i] = v;
}
/**
* \brief Equivalent to += operator. Element by element
* addition of one `FG_vector` to another.
* \param other An `FG_vector` smart pointer object.
* **parallel**
*
*/
void plus_eq(FG_vector<T>::ptr other) {
assert(get_size() == other->get_size());
for (size_t i = 0; i < get_size(); i++) {
eles[i] += other->get(i);
}
}
/**
* \brief Assign a value `num` many times to the vector.
* \param num The number of elements to assign.
* \param val The value a user wnats to assign to vector positions.
*/
void assign(size_t num, T val) {
eles.assign(num, val);
}
/**
* \brief Make a shallow copy of the vector.
* \param other An `FG_vector` smart pointer.
* **paralel**
*/
void shallow_copy(FG_vector<T>::ptr other) {
assert(this->get_size() == other->get_size());
#pragma omp parallel for
for (size_t i = 0; i < get_size(); i++) {
this->eles[i] = other->eles[i];
}
}
template<class T1>
void copy_to(T1 *arr, size_t size) {
size_t num = std::min(size, eles.size());
for (size_t i = 0; i < num; i++)
arr[i] = eles[i];
}
/**
* \brief Check for equality between two `FG_vector`s element by
* element.
* \param other An `FG_vector` smart pointer.
*/
// TODO DM: Make parallel / smarter
bool eq_all(FG_vector<T>::ptr other) {
return std::equal(this->eles.begin(), this->eles.end(), other->eles.begin());
}
void init_rand(long max, unsigned int seed = 0) {
if (seed > 0)
srandom(seed);
if (max >= std::numeric_limits<T>::max())
max = std::numeric_limits<T>::max();
#pragma omp parallel for
for (size_t i = 0; i < eles.size(); i++)
eles[i] = random();
}
/**
* \brief Populate an [STL set](http://www.cplusplus.com/reference/set/set/)
* with the unique elements in the vector. All duplicates are ignored.
*
* \param set The *empty* STL set that will be populated with unique vector members.
*
*/
void unique(std::set<T> &set) const {
// TODO we need a parallel implementation.
assert(set.empty()); // FIXME: `new` a shared/unique ptr & remove param
BOOST_FOREACH(T v, eles) {
set.insert(v);
}
}
/**
* \brief Count the number of unique items in the vector using a
* count map.
* \param map An *empty* `count_map` object that is used to count
* the number of unique elements in the vector.
*
*/
void count_unique(count_map<T> &map) const {
// TODO we need a parallel implementation.
assert(map.get_size() == 0); // FIXME: `new` a shared/unique ptr & remove param
BOOST_FOREACH(T v, eles) {
map.add(v);
}
}
/**
* \brief Get the number of elements contained in the vector.
*
* \return The number of elements in the vector
*/
size_t get_size() const {
return eles.size();
}
/**
* \brief Get a pointer to the memory array used internally by
* the vector to store its owned elements.
* \return A pointer the underlying data memory array.
*
*/
T *get_data() {
return eles.data();
}
/**
* \brief Const method to get a pointer to the memory array
* used internally by the vector to store its owned elements.
* \return A const pointer the underlying data memory array.
*
*
*/
const T*get_data() const {
return eles.data();
}
/**
* \brief Compute the [dot product](http://en.wikipedia.org/wiki/Dot_product)
* of two FG vectors. <br>
* **parallel**
*
* \return A value of data type `T` value that is the dot product.
*/
T dot_product(const FG_vector<T> &other) const {
assert(this->get_size() == other.get_size());
T ret = 0;
#pragma omp parallel for reduction(+:ret)
for (size_t i = 0; i < get_size(); i++)
ret += get(i) * other.get(i);
return ret;
}
/**
* \brief Compute the
* [L2 Norm](http://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm)
* (also know as Euclidean distance) of a vector. <br>
* **parallel**
*
* \return An object of type `T` with the value of the L2 norm.
*/
T norm2() const {
T ret = 0;
#pragma omp parallel for reduction(+:ret)
for (size_t i = 0; i < get_size(); i++)
ret += get(i) * get(i);
return sqrt(ret);
}
/**
* \brief Compute the
* [L1 Norm](http://en.wikipedia.org/wiki/Norm_(mathematics)#Taxicab_norm_or_Manhattan_norm)
* (also Taxicab norm) of an FG_vector. <br>
* **parallel**
*
* \return An object of type `T` with the L1 norm.
*/
T norm1() const {
T ret = 0;
#pragma omp parallel for reduction(+:ret)
for (size_t i = 0; i < get_size(); i++)
ret += fabs(get(i));
return ret;
}
/**
* \brief Compute the sum of all elements in the vector. <br>
* If the type is integer, the sum can overflow.
* **parallel**
* \return The sum of all items in the vector.
*/
T sum() const {
return sum<T>();
}
/**
* \brief Compute the sum of all elements in the vector. <br>
* This sum() allows users to specify the type of the result, so users
* can avoid integer overflow.
* **parallel**
* \return The sum of all items in the vector.
*/
template<class ResType>
ResType sum() const {
struct identity_func {
ResType operator()(T v) {
return v;
}
};
return aggregate<identity_func, ResType>(identity_func());
}
template<class Func, class ResType>
ResType aggregate(Func func) const {
ResType ret = 0;
#pragma omp parallel for reduction(+:ret)
for (size_t i = 0; i < get_size(); i++)
ret += func(eles[i]);
return ret;
}
/**
* \brief Find the maximal value in the vector and return its value.
* \return The maximal value in the vector.
*/
T max() const {
return max_val_loc().first;
}
/**
* \brief Find the maximal value in the vector and return its value
* and its location.
* \return A pair that contains the maximal value and its location
* in the vector.
*/
std::pair<T, off_t> max_val_loc() const {
T ret = std::numeric_limits<T>::min();
off_t idx = 0;
for (size_t i = 0; i < get_size(); i++) {
if (ret < get(i)) {
ret = get(i);
idx = i;
}
}
return std::pair<T, off_t>(ret, idx);
}
void max_val_locs(size_t num, std::vector<std::pair<T, off_t> > &pairs) const {
typedef std::pair<T, off_t> val_loc_t;
struct comp_val {
bool operator()(const val_loc_t &v1, const val_loc_t &v2) {
return v1.first > v2.first;
}
};
std::priority_queue<val_loc_t, std::vector<val_loc_t>, comp_val> queue;
for (size_t i = 0; i < get_size(); i++) {
T val = get(i);
queue.push(val_loc_t(val, i));
if (queue.size() > num)
queue.pop();
}
while (!queue.empty()) {
val_loc_t pair = queue.top();
queue.pop();
pairs.push_back(pair);
}
}
/**
* \brief Find the index with the minmimal value in the vector and
* return its value.
* \return The minimal value in the vector.
*/
T min() const {
T ret = std::numeric_limits<T>::max();
for (size_t i = 0; i < get_size(); i++)
ret = std::min(get(i), ret);
return ret;
}
/**
* \brief Find the index with the minimal value in the vector and
* return *the index*.
* \return The minimal index value in the vector.
*/
size_t argmin() {
typename std::vector<T>::iterator res = std::min_element(eles.begin(), eles.end());
size_t ret = std::distance(eles.begin(), res);
return ret;
}
/**
* \brief Serial element-wise print of the vector.
* **Not intended for very large vectors**
*/
void print() {
std::cout << "[";
for (vsize_t i=0; i < get_size(); i++) {
std::cout << " " << get(i);
}
std::cout << " ]\n\n";
}
/**
* \brief Write the space separated vector to file.
* \param fn The file name you wish written to file.
*/
void to_file(std::string fn) {
std::ofstream f;
f.open(fn);
for (vsize_t i=0; i < get_size(); i++) {
f << get(i) << " ";
}
f.close();
}
/**
* \brief In place division of vector by a single value.
* \param v The value by which you want the array divided.
* **parallel**
*/
void div_by_in_place(T v) {
#pragma omp parallel for
for (size_t i = 0; i < get_size(); i++)
eles[i] /= v;
}
/**
* \brief element-wise merge with another vector and store the result
* in this vector.
* \param vec The vector that you want to merge with.
* \param func The operator that you want to perform on each pair of
* elements.
*/
template<class MergeFunc, class VecType>
void merge_in_place(typename FG_vector<VecType>::ptr vec, MergeFunc func) {
assert(this->get_size() == vec->get_size());
#pragma omp parallel for
for (size_t i = 0; i < get_size(); i++)
eles[i] = func(eles[i], vec->get(i));
}
/**
* \brief In place element-wise addition by another vector.
* \param vec The vector by which you want to add to this vector.
* **parallel**
*/
void add_in_place(FG_vector<T>::ptr vec) {
struct add_func {
T operator()(const T &v1, const T &v2) {
return v1 + v2;
}
};
merge_in_place<add_func, T>(vec, add_func());
}
/**
* \brief In place subtraction of the vector by another vector.
* \param vec The vector by which you want the array to be subtracted.
* **parallel**
*/
void subtract_in_place(const FG_vector<T>::ptr &vec) {
struct sub_func {
T operator()(const T &v1, const T &v2) {
return v1 - v2;
}
};
merge_in_place<sub_func, T>(vec, sub_func());
}
/**
* \brief Normalize vector using an Lx form.
* **parallel**
*/
void normalize(int type) {
T norm;
switch(type) {
case 2:
norm = norm2();
break;
case 1:
norm = norm1();
break;
default:
ABORT_MSG("normalize on wrong type");
}
div_by_in_place(norm);
}
/**
* \brief Apply a function to every element in an FG_vector.
*
* \param func A user-defined function.
* \param output The FG_vector that you want to apply the function to.
*
* **parallel**
*/
template<class ApplyFunc>
void apply(ApplyFunc func, FG_vector<T> &output) {
#pragma omp parallel for
for (size_t i = 0; i < get_size(); i++)
output.set(i, func(eles[i]));
}
// TODO these interfaces assume shared memory.
/**
* Set a value of an index in the vector.
*
* **NOTE:** This function assumes a shared memory environment.
* \param id The index where value is being set.
* \param v The value that the index will be set to.
*/
void set(vertex_id_t id, const T &v) {
eles[id] = v;
}
/**
* \brief Const get the value of a particular index.
* \param id The index of the vector from where you want a value.
* \return The value requested by param 1
*
*/
const T &get(vertex_id_t id) const {
return eles[id];
}
/**
* \brief Non-const get the value of a particular index.
* \param id The index of the vector from where you want a value.
* \return The value requested by param 1
*
*/
T &get(vertex_id_t id) {
return eles[id];
}
log_histogram log_hist(int power) const {
T max_v = max();
int num_buckets = ceil(log(max_v) / log(power));
log_histogram hist(std::max(num_buckets, 1));
for (size_t i = 0; i < get_size(); i++) {
hist.add_value(eles[i]);
}
return hist;
}
};
/**
* \brief Apply a user defined function to multipl FG_vectors.
* **parallel**
* \param inputs A vector of FG_vectors that are the inputs.
* \param output A FG_vector that are the outputs.
* \param apply The user-defined function that will be applied to all vecotors.
*/
template<class T, class ApplyFunc>
void multi_vec_apply(const std::vector<typename FG_vector<T>::ptr> &inputs,
typename FG_vector<T>::ptr output, ApplyFunc apply)
{
for (size_t i = 0; i < inputs.size(); i++)
assert(output->get_size() == inputs[i]->get_size());
#pragma omp parallel for
for (size_t i = 0; i < output->get_size(); i++)
output->set(i, apply(i, inputs));
}
#endif
|
matmul.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#define M 2048
#define N 2048
#define K 2048
/*#define M 1000
#define N 1000
#define K 1000*/
#define alpha 1
#define beta 1
double A[M][K];
double B[K][N];
double C[M][N];
//double newB[N][K]; //added line for transposed array B
#define IF_TIME(foo) foo;
void init_array()
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
A[i][j] = (i + j);
B[i][j] = (double)(i*j);
C[i][j] = 0.0;
//newB[i][j]=0.0;
}
}
}
void print_array()
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
fprintf(stdout, "%lf ", C[i][j]);
if (j%80 == 79) fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
}
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
double t_start, t_end;
int main()
{
int i, j, k;
register double s;
init_array();
IF_TIME(t_start = rtclock());
/* Code to be optimized - start */
/*
//transposing matrix B
for(i=0;i<K;i++){
for(j=0;j<N;j++){
newB[j][i]=B[i][j];
}
}
*/
//Add the pragma line here
#pragma omp parallel for private(j,k)
for(i=0; i<M; i++)
for(k=0; k<K; k++)
for(j=0; j<N; j++)
C[i][j] = beta*C[i][j] + alpha*A[i][k] * B[k][j];
/* Code to be optimized - end */
IF_TIME(t_end = rtclock());
IF_TIME(fprintf(stderr, "%0.6lfs\n", t_end - t_start));
if (fopen(".test", "r")) {
print_array();
}
return 0;
}
|
detector.c | #include "darknet.h"
#include <stdio.h>
#ifdef WIN32
#include "unistd\dirent.h"
#else
#include <dirent.h>
#endif
#ifdef WIN32
#include "unistd\unistd.h"
#else
#include <unistd.h>
#endif
#include <sys/stat.h>
#define class temp
struct stat st;
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = (network**)calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
if(gpu_index >= 0){
opencl_set_device(i);
}
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
#ifndef BENCHMARK
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
#endif
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
#ifdef LOSS_ONLY
double time=what_time_is_it_now();
#else
double time;
#endif
int count = 0;
if(count == 0) {
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base);
save_weights(net, buff);
}
int max_size = ((net->w + net->h)/2);
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("Resizing\n");
#endif
int dim = max_size - ((rand() % 8) * 32);
#ifdef BENCHMARK
dim = 608;
#endif
if (get_current_batch(net)+200 > net->max_batches) dim = max_size;
if (net->w < dim || net->h < dim) dim = max_size;
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("%d\n", dim);
#endif
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
#ifndef LOSS_ONLY
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
#endif
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
float loss = 0;
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus == 1) {
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
}
else {
loss = train_network(net, train);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
#ifdef LOSS_ONLY
printf("%lf\t%f\n", what_time_is_it_now()-time, loss);
#else
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
#endif
#ifdef GPU
if (loss != loss && gpu_index >= 0) {
opencl_deinit(gpusg, ngpusg);
}
#endif
if(loss != loss) { printf("NaN LOSS detected! No possible to continue!\n"); exit(-7); }
if(i%100==0){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
#ifdef GPU_STATS
opencl_dump_mem_stat();
#endif
#ifdef BENCHMARK
break;
#endif
}
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
free(paths);
free(plist);
free(base);
free(nets);
free(options);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
//list *plist = get_paths("data/coco_val_5k.list");
list *options = read_data_cfg(datacfg);
char *test_images = option_find_str(options, "test", "data/test.list");
list *plist = get_paths(test_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
network_predict(net, X);
}
if (l.type == YOLO4) {
network_predict_y4(net, X);
}
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = 0;
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
}
if (l.type == YOLO4) {
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
}
//printf("%d\n", nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
}
if (l.type == YOLO4) {
draw_detections_v3(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
}
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
show_image(im, "predictions", 0);
#endif
}
free_image(im);
if (resize) free_image(sized);
if (filename) break;
}
}
int exists(const char *fname, const char* ext)
{
FILE *file;
if (strstr(fname, ext) && (file = fopen(fname, "r")))
{
fclose(file);
return 1;
}
return 0;
}
int empty(char *dirname) {
int n = 0;
struct dirent *d;
DIR *dir = opendir(dirname);
if (dir == NULL) // not a dir or doesn't exist
return 1;
while ((d = readdir(dir)) != NULL) {
if(++n > 2)
break;
}
closedir(dir);
if (n <= 2) //dir empty
return 1;
else
return 0;
}
void test_ddetector(char *datacfg, char *cfgfile, char *weightfile, char *in_dir, float thresh, float hier_thresh, char *out_dir)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
char fname[256];
char ffname[1024];
char ffoname[1024];
struct dirent *de = NULL;
while(1) {
while (empty(in_dir)) {
usleep(100);
}
DIR *dr = opendir(in_dir);
while ((de = readdir(dr)) != NULL) {
printf("%s\n", de->d_name);
strcpy(fname, de->d_name);
strcpy(ffname, in_dir);
strcat(ffname, "/");
strcat(ffname, fname);
if (!exists(ffname, ".jpg")) continue;
if (1) {
strcpy(ffoname, out_dir);
strcat(ffoname, "/");
strcat(ffoname, fname);
int len = strlen(ffoname) - 4;
ffoname[len] = '\0';
strncpy(input, ffname, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input) continue;
strtok(input, "\n");
}
off_t size = 0;
off_t offs = 0;
do {
offs = size;
stat(input, &st);
size = st.st_size;
if (offs != size) usleep(10); else break;
} while (1);
image im = load_image_color(input, 0, 0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n - 1];
float *X = sized.data;
time = what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now() - time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
free_detections(dets, nboxes);
free_image(im);
if (resize) free_image(sized);
// if (filename) break;
remove(input);
}
closedir(dr);
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = (int*)calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(datacfg, cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
#undef class |
ParFriends.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _PAR_FRIENDS_H_
#define _PAR_FRIENDS_H_
#include "mpi.h"
#include <iostream>
#include <cstdarg>
#include "SpParMat.h"
#include "SpParMat3D.h"
#include "SpParHelper.h"
#include "MPIType.h"
#include "Friends.h"
#include "OptBuf.h"
#include "mtSpGEMM.h"
#include "MultiwayMerge.h"
#include <unistd.h>
#include <type_traits>
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
/*************************************************************************************************/
/**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/
/*************************************************************************************************/
/**
** Concatenate all the FullyDistVec<IT,NT> objects into a single one
**/
template <typename IT, typename NT>
FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs)
{
if(vecs.size() < 1)
{
SpParHelper::Print("Warning: Nothing to concatenate, returning empty ");
return FullyDistVec<IT,NT>();
}
else if (vecs.size() < 2)
{
return vecs[1];
}
else
{
typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin();
std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid();
MPI_Comm World = commGridPtr->GetWorld();
IT nglen = it->TotalLength(); // new global length
IT cumloclen = it->MyLocLength(); // existing cumulative local lengths
++it;
for(; it != vecs.end(); ++it)
{
if(*(commGridPtr) != *(it->getcommgrid()))
{
SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n");
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
nglen += it->TotalLength();
cumloclen += it->MyLocLength();
}
FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT());
int nprocs = commGridPtr->GetSize();
std::vector< std::vector< NT > > data(nprocs);
std::vector< std::vector< IT > > inds(nprocs);
IT gloffset = 0;
for(it = vecs.begin(); it != vecs.end(); ++it)
{
IT loclen = it->LocArrSize();
for(IT i=0; i < loclen; ++i)
{
IT locind;
IT loffset = it->LengthUntil();
int owner = ConCat.Owner(gloffset+loffset+i, locind);
data[owner].push_back(it->arr[i]);
inds[owner].push_back(locind);
}
gloffset += it->TotalLength();
}
int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
for(int i=0; i<nprocs; ++i)
sendcnt[i] = (int) data[i].size();
int * rdispls = new int[nprocs];
int * recvcnt = new int[nprocs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0));
NT * senddatabuf = new NT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]);
std::vector<NT>().swap(data[i]); // delete data vectors
}
NT * recvdatabuf = new NT[totrecv];
MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data
delete [] senddatabuf;
IT * sendindsbuf = new IT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]);
std::vector<IT>().swap(inds[i]); // delete inds vectors
}
IT * recvindsbuf = new IT[totrecv];
MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds
DeleteAll(sendindsbuf, sendcnt, sdispls);
for(int i=0; i<nprocs; ++i)
{
for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)
{
ConCat.arr[recvindsbuf[j]] = recvdatabuf[j];
}
}
DeleteAll(recvindsbuf, recvcnt, rdispls);
return ConCat;
}
}
template <typename MATRIXA, typename MATRIXB>
bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B)
{
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return false;
}
if((void*) &A == (void*) &B)
{
std::ostringstream outs;
outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS);
return false;
}
return true;
}
// Combined logic for prune, recovery, and select
template <typename IT, typename NT, typename DER>
void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
#ifdef TIMING
double t0, t1;
#endif
// Prune and create a new pruned matrix
SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false);
// column-wise statistics of the pruned matrix
FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0);
FullyDistVec<IT,NT> nnzPerColumnUnpruned = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
//FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold);
FullyDistVec<IT,NT> pruneCols(nnzPerColumn);
pruneCols = hardThreshold;
PrunedA.FreeMemory();
FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum));
// recover only when nnzs in unprunned columns are greater than nnzs in pruned column
recoverCols = EWiseApply<NT>(recoverCols, nnzPerColumnUnpruned,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval > spval;},
false, NT());
recoverCols = recoverPct;
// columns with nnz < r AND sum < recoverPct (pct)
recoverCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT nrecover = recoverCols.getnnz();
if(nrecover > 0)
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(recoverCols, recoverNum, kselectVersion);
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(recoverCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing recovery: " << nrecover << std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(selectNum>0)
{
// remaining columns will be up for selection
FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return spval==-1;},
true, static_cast<NT>(-1));
selectCols = selectNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval > spval;},
false, NT());
IT nselect = selectCols.getnnz();
if(nselect > 0 )
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing selection: " << nselect << std::endl;
SpParHelper::Print(outs.str());
#endif
#ifdef TIMING
t0=MPI_Wtime();
#endif
SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
if(recoverNum>0 ) // recovery can be attempted after selection
{
FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0);
selectedA.FreeMemory();
// slected columns with nnz < recoverNum (r)
selectCols = recoverNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
// selected columns with sum < recoverPct (pct)
selectCols = recoverPct;
selectCols = EWiseApply<NT>(selectCols, colSums1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT n_recovery_after_select = selectCols.getnnz();
if(n_recovery_after_select>0)
{
// mclExpandVector2 does it on the original vector
// mclExpandVector1 does it one pruned vector
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs1;
outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl;
SpParHelper::Print(outs1.str());
#endif
}
}
}
}
// final prune
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.PruneColumn(pruneCols, std::less<NT>(), true);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
// Add loops for empty columns
if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns
{
FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0));
emptyColumns = 1.00;
//Ariful: We need a selective AddLoops function with a sparse vector
//A.AddLoops(emptyColumns);
}
}
template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
IU EstimateFLOP
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
IU local_flops = 0;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
local_flops += EstimateLocalFLOP<SR>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
}
if(clearA && A.spSeq != NULL) {
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL) {
delete B.spSeq;
B.spSeq = NULL;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
IU global_flops = 0;
MPI_Allreduce(&local_flops, &global_flops, 1, MPI_LONG_LONG_INT, MPI_SUM, A.getcommgrid()->GetWorld());
return global_flops;
}
/**
* Broadcasts A multiple times (#phases) in order to save storage in the output
* Only uses 1/phases of C memory if the threshold/max limits are proper
*/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B,
int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory)
{
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return SpParMat< IU,NUO,UDERO >();
}
if(phases <1 || phases >= A.getncol())
{
SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n");
phases = 1;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
double t0, t1, t2, t3, t4, t5;
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t0 = MPI_Wtime();
#endif
if(perProcessMemory>0) // estimate the number of phases permitted by memory
{
int p;
MPI_Comm World = GridC->GetWorld();
MPI_Comm_size(World,&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
// max nnz(A) in a porcess
int64_t lannz = A.getlocalnnz();
int64_t gannz;
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World);
int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA)
// max nnz(A^2) stored by SUMMA in a porcess
int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false);
int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step
// estimate kselect memory
int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices)
// this is equivalent to (asquareNNZ * p) / B.getcol()
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
int64_t kselectmem = B.getlocalcols() * k * 8 * 3;
// estimate output memory
int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p);
int64_t outputMem = outputNNZ * perNNZMem_in * 2;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem;
if(remainingMem > 0)
{
phases = 1 + (asquareMem+kselectmem) / remainingMem;
}
if(myrank==0)
{
if(remainingMem < 0)
{
std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
}
#ifdef SHOW_MEMORY_USAGE
int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases;
if(maxMemory>1000000000)
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl;
else
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl;
#endif
}
}
if(myrank == 0){
fprintf(stderr, "[MemEfficientSpGEMM] Running with phase: %d\n", phases);
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t1 = MPI_Wtime();
mcl_symbolictime += (t1-t0);
#endif
LIA C_m = A.spSeq->getnrow();
LIB C_n = B.spSeq->getncol();
std::vector< UDERB > PiecesOfB;
UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy
CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point
MPI_Barrier(GridC->GetWorld());
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same");
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< UDERO > toconcatenate;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int dbg = 0; dbg < 1; dbg++){
for(int p = 0; p< phases; ++p)
{
SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld());
std::vector< SpTuples<LIC,NUO> *> tomerge;
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself) ARecv = A.spSeq; // shallow-copy
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t0 = MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t1 = MPI_Wtime();
mcl_Abcasttime += (t1-t0);
#endif
ess.clear();
if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
ess[j] = BRecvSizes[j][i];
BRecv = new UDERB();
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t2=MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t3=MPI_Wtime();
mcl_Bbcasttime += (t3-t2);
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t4=MPI_Wtime();
#endif
double vm_usage, resident_set;
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself);
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t5=MPI_Wtime();
mcl_localspgemmtime += (t5-t4);
#endif
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
} // all stages executed
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_unmerged, lcnnz_unmerged = 0;
for(size_t i = 0; i < tomerge.size(); ++i)
{
lcnnz_unmerged += tomerge[i]->getnnz();
}
MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts
if(myrank==0)
{
if(summa_memory>1000000000)
std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ;
else
std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t6=MPI_Wtime();
#endif
//UDERO OnePieceOfC(MergeAll<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true), false);
// TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy
SpTuples<LIC,NUO> * OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_merged, lcnnz_merged ;
lcnnz_merged = OnePieceOfC_tuples->getnnz();
MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20;
if(myrank==0)
{
if(merge_memory>1000000000)
std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ;
else
std::cout << " merged: " << merge_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t7=MPI_Wtime();
mcl_multiwaymergetime += (t7-t6);
#endif
UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false);
delete OnePieceOfC_tuples;
SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC);
MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion);
//mcl_nnzc += OnePieceOfC_mat.getnnz();
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_pruned, lcnnz_pruned ;
lcnnz_pruned = OnePieceOfC_mat.getlocalnnz();
MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy!
//phase_nnz += gcnnz_pruned;
if(myrank==0)
{
if(prune_memory>1000000000)
std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ;
else
std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ;
}
#endif
// ABAB: Change this to accept pointers to objects
if(dbg == 0) {
toconcatenate.push_back(OnePieceOfC_mat.seq());
}
}
//double vm_usage, resident_set;
//process_mem_usage(vm_usage, resident_set);
//if(myrank == 0) fprintf(stderr, "VmSize after %dth all phase: %lf %lf\n", dbg+1, vm_usage, resident_set);
}
UDERO * C = new UDERO(0,C_m, C_n,0);
C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERA::esscount);
return SpParMat<IU,NUO,UDERO> (C, GridC);
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
int CalculateNumberOfPhases (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B,
NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory){
int phases;
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
double t0, t1, t2, t3, t4, t5;
int p;
MPI_Comm World = GridC->GetWorld();
MPI_Comm_size(World,&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
// max nnz(A) in a porcess
int64_t lannz = A.getlocalnnz();
int64_t gannz;
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World);
int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA)
// max nnz(A^2) stored by SUMMA in a porcess
int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false);
int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step
// estimate kselect memory
int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices)
// this is equivalent to (asquareNNZ * p) / B.getcol()
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
int64_t kselectmem = B.getlocalcols() * k * 8 * 3;
// estimate output memory
int64_t outputNNZ = (B.getlocalcols() * d)/sqrt(p);
//int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); // if kselect is used
int64_t outputMem = outputNNZ * perNNZMem_in * 2;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
//int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem;
int64_t remainingMem = perProcessMemory*1000000000 - inputMem; // if each phase result is discarded
//if(remainingMem > 0)
//{
//phases = 1 + (asquareMem+kselectmem) / remainingMem;
//}
phases = 1 + asquareMem / remainingMem;
return phases;
}
/**
* Parallel C = A*B routine that uses a double buffered broadcasting scheme
* @pre { Input matrices, A and B, should not alias }
* Most memory efficient version available. Total stages: 2*sqrt(p)
* Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C)
* Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C)
* Final memory requirement: nnz(C) if clearA and clearB are true
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same");
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
LIA C_m = A.spSeq->getnrow();
LIB C_n = B.spSeq->getncol();
UDERA * A1seq = new UDERA();
UDERA * A2seq = new UDERA();
UDERB * B1seq = new UDERB();
UDERB * B2seq = new UDERB();
(A.spSeq)->Split( *A1seq, *A2seq);
const_cast< UDERB* >(B.spSeq)->Transpose();
(B.spSeq)->Split( *B1seq, *B2seq);
// Transpose back for the column-by-column algorithm
const_cast< UDERB* >(B1seq)->Transpose();
const_cast< UDERB* >(B2seq)->Transpose();
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<LIC,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A1seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B1seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this remove transposing B1seq
/*
SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
if(clearA) delete A1seq;
if(clearB) delete B1seq;
// Set the new dimensions
SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Start the second round
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A2seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B2seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this remove transposing B2seq
/*
SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
if(clearA)
{
delete A2seq;
delete A.spSeq;
A.spSeq = NULL;
}
else
{
(A.spSeq)->Merge(*A1seq, *A2seq);
delete A1seq;
delete A2seq;
}
if(clearB)
{
delete B2seq;
delete B.spSeq;
B.spSeq = NULL;
}
else
{
B1seq->Transpose();
B2seq->Transpose();
(B.spSeq)->Merge(*B1seq, *B2seq);
delete B1seq;
delete B2seq;
const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
}
UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
void process_mem_usage(double& vm_usage, double& resident_set)
{
using std::ios_base;
using std::ifstream;
using std::string;
vm_usage = 0.0;
resident_set = 0.0;
// 'file' stat seems to give the most reliable results
//
ifstream stat_stream("/proc/self/stat",ios_base::in);
// dummy vars for leading entries in stat that we don't care about
//
string pid, comm, state, ppid, pgrp, session, tty_nr;
string tpgid, flags, minflt, cminflt, majflt, cmajflt;
string utime, stime, cutime, cstime, priority, nice;
string O, itrealvalue, starttime;
// the two fields we want
//
unsigned long vsize;
long rss;
stat_stream >> pid >> comm >> state >> ppid >> pgrp >> session >> tty_nr
>> tpgid >> flags >> minflt >> cminflt >> majflt >> cmajflt
>> utime >> stime >> cutime >> cstime >> priority >> nice
>> O >> itrealvalue >> starttime >> vsize >> rss; // don't care about the rest
stat_stream.close();
long page_size_kb = sysconf(_SC_PAGE_SIZE) / 1024; // in case x86-64 is configured to use 2MB pages
//vm_usage = vsize / (1024.0 * 1024 * 1024);
//resident_set = rss * page_size_kb/(1024 * 1024.0);
vm_usage = vsize / (1024.0);
resident_set = rss * page_size_kb/(1.0);
double max_vm_usage;
double max_resident_set;
MPI_Allreduce(&vm_usage, &max_vm_usage, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
MPI_Allreduce(&resident_set, &max_resident_set, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
vm_usage = max_vm_usage;
resident_set = max_resident_set;
}
/**
* Parallel A = B*C routine that uses only MPI-1 features
* Relies on simple blocking broadcast
* @pre { Input matrices, A and B, should not alias }
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
double Abcast_time = 0;
double Bbcast_time = 0;
double Local_multiplication_time = 0;
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t0 = MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t1 = MPI_Wtime();
mcl3d_Abcasttime += (t1-t0);
Abcast_time += (t1-t0);
#endif
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t2 = MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t3 = MPI_Wtime();
mcl3d_Bbcasttime += (t3-t2);
Bbcast_time += (t3-t2);
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t4 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t5 = MPI_Wtime();
mcl3d_localspgemmtime += (t5-t4);
Local_multiplication_time += (t5-t4);
#endif
if(!C_cont->isZero())
tomerge.push_back(C_cont);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << i << "th SUMMA iteration"<< std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(clearA && A.spSeq != NULL)
{
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL)
{
delete B.spSeq;
B.spSeq = NULL;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
//UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
// First get the result in SpTuples, then convert to UDER
// the last parameter to MergeAll deletes tomerge arrays
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t0 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t1 = MPI_Wtime();
mcl3d_SUMMAmergetime += (t1-t0);
#endif
UDERO * C = new UDERO(*C_tuples, false);
delete C_tuples;
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
#ifdef TIMING
if(myrank == 0){
fprintf(stderr, "[Mult_AnXBn_Synch]\t Abcast_time: %lf\n", Abcast_time);
fprintf(stderr, "[Mult_AnXBn_Synch]\t Bbcast_time: %lf\n", Bbcast_time);
fprintf(stderr, "[Mult_AnXBn_Synch]\t Local_multiplication_time: %lf\n", Local_multiplication_time);
fprintf(stderr, "[Mult_AnXBn_Synch]\t SUMMA Merge time: %lf\n", (t1-t0));
}
#endif
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU, NUO, UDERO> Mult_AnXBn_Overlap
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA ** ARecv = new UDERA* [stages];
UDERB ** BRecv = new UDERB* [stages];
Arr<IU,NU1> Aarrinfo = A.seqptr()->GetArrays();
Arr<IU,NU2> Barrinfo = B.seqptr()->GetArrays();
std::vector< std::vector<MPI_Request> > ABCastIndarrayReq;
std::vector< std::vector<MPI_Request> > ABCastNumarrayReq;
std::vector< std::vector<MPI_Request> > BBCastIndarrayReq;
std::vector< std::vector<MPI_Request> > BBCastNumarrayReq;
for(int i = 0; i < stages; i++){
ABCastIndarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.indarrs.size(), MPI_REQUEST_NULL) );
ABCastNumarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.numarrs.size(), MPI_REQUEST_NULL) );
BBCastIndarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.indarrs.size(), MPI_REQUEST_NULL) );
BBCastNumarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.numarrs.size(), MPI_REQUEST_NULL) );
}
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
std::vector< SpTuples<IU,NUO> *> tomerge;
for(int i = 0; i < stages; ++i){
std::vector<IU> ess;
if(i == Aself) ARecv[i] = A.spSeq; // shallow-copy
else{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
ARecv[i] = new UDERA(); // first, create the object
}
SpParHelper::IBCastMatrix(GridC->GetRowWorld(), *(ARecv[i]), ess, i, ABCastIndarrayReq[i], ABCastNumarrayReq[i]); // then, receive its elements
ess.clear();
if(i == Bself) BRecv[i] = B.spSeq; // shallow-copy
else{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i];
BRecv[i] = new UDERB();
}
SpParHelper::IBCastMatrix(GridC->GetColWorld(), *(BRecv[i]), ess, i, BBCastIndarrayReq[i], BBCastNumarrayReq[i]); // then, receive its elements
if(i > 0){
MPI_Waitall(ABCastIndarrayReq[i-1].size(), ABCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(ABCastNumarrayReq[i-1].size(), ABCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastIndarrayReq[i-1].size(), BBCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastNumarrayReq[i-1].size(), BBCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*(ARecv[i-1]), *(BRecv[i-1]), // parameters themselves
i-1 != Aself, // 'delete A' condition
i-1 != Bself); // 'delete B' condition
if(!C_cont->isZero()) tomerge.push_back(C_cont);
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
std::vector< SpTuples<IU,NUO> *>().swap(tomerge);
tomerge.push_back(C_tuples);
}
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << i << "th SUMMA iteration"<< std::endl;
SpParHelper::Print(outs.str());
#endif
}
MPI_Waitall(ABCastIndarrayReq[stages-1].size(), ABCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(ABCastNumarrayReq[stages-1].size(), ABCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastIndarrayReq[stages-1].size(), BBCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastNumarrayReq[stages-1].size(), BBCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*(ARecv[stages-1]), *(BRecv[stages-1]), // parameters themselves
stages-1 != Aself, // 'delete A' condition
stages-1 != Bself); // 'delete B' condition
if(!C_cont->isZero()) tomerge.push_back(C_cont);
if(clearA && A.spSeq != NULL) {
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL) {
delete B.spSeq;
B.spSeq = NULL;
}
delete ARecv;
delete BRecv;
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
// the last parameter to MergeAll deletes tomerge arrays
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
std::vector< SpTuples<IU,NUO> *>().swap(tomerge);
UDERO * C = new UDERO(*C_tuples, false);
delete C_tuples;
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction
* @pre { Input matrices, A and B, should not alias }
**/
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
int64_t EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool hashEstimate)
{
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
double t0, t1;
int64_t nnzC_SUMMA = 0;
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return nnzC_SUMMA;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
MPI_Barrier(GridC->GetWorld());
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
t0 = MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
#ifdef TIMING
t1 = MPI_Wtime();
sym_Abcasttime += t1-t0;
#endif
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
#ifdef TIMING
MPI_Barrier(GridC->GetWorld());
t0 = MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
#ifdef TIMING
t1 = MPI_Wtime();
sym_Bbcasttime += t1-t0;
#endif
// no need to keep entries of colnnzC in larger precision
// because colnnzC is of length nzc and estimates nnzs per column
// @OGUZ-EDIT Using hash spgemm for estimation
//LIB * colnnzC = estimateNNZ(*ARecv, *BRecv);
#ifdef TIMING
t0 = MPI_Wtime();
#endif
LIB* flopC = estimateFLOP(*ARecv, *BRecv);
#ifdef TIMING
t1 = MPI_Wtime();
sym_estimatefloptime += t1-t0;
#endif
#ifdef TIMING
t0 = MPI_Wtime();
#endif
LIB* colnnzC = estimateNNZ_Hash(*ARecv, *BRecv, flopC);
#ifdef TIMING
t1 = MPI_Wtime();
sym_estimatennztime += t1-t0;
#endif
LIB nzc = BRecv->GetDCSC()->nzc;
int64_t nnzC_stage = 0;
#ifdef TIMING
int64_t stage_proc_flop = 0;
#ifdef THREADED
#pragma omp parallel for reduction (+:stage_proc_flop)
#endif
for (LIB k=0; k<nzc; k++)
{
stage_proc_flop = stage_proc_flop + flopC[k];
}
mcl3d_proc_flop += stage_proc_flop;
#endif
if (flopC) delete [] flopC;
#ifdef TIMING
t0 = MPI_Wtime();
#endif
#ifdef THREADED
#pragma omp parallel for reduction (+:nnzC_stage)
#endif
for (LIB k=0; k<nzc; k++)
{
nnzC_stage = nnzC_stage + colnnzC[k];
}
nnzC_SUMMA += nnzC_stage;
#ifdef TIMING
t1 = MPI_Wtime();
sym_SUMMAnnzreductiontime += t1-t0;
#endif
if(colnnzC) delete [] colnnzC;
// sampling-based estimation (comment the estimation above, and
// comment out below to use)
// int64_t nnzC_stage = estimateNNZ_sampling(*ARecv, *BRecv);
// nnzC_SUMMA += nnzC_stage;
// delete received data
if(i != Aself)
delete ARecv;
if(i != Bself)
delete BRecv;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
int64_t nnzC_SUMMA_max = 0;
MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<int64_t>(), MPI_MAX, GridC->GetWorld());
return nnzC_SUMMA_max;
}
template <typename MATRIX, typename VECTOR>
void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x)
{
if(A.getncol() != x.TotalLength())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << x.TotalLength() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) )
{
std::cout << "Grids are not comparable for SpMV" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
}
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf);
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >();
return SpMV<SR>(A, x, indexisvalue, optbuf);
}
/**
* Step 1 of the sparse SpMV algorithm
* @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated }
* @param[in] indexisvalue
**/
template<typename IU, typename NV>
void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue)
{
int32_t xlocnz = (int32_t) x.getlocnnz();
int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t
int32_t roffset;
IU luntil = x.LengthUntil();
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status);
MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status);
MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status);
// ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible
// Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth
trxinds = new int32_t[trxlocnz];
int32_t * temp_xind = new int32_t[xlocnz];
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i< xlocnz; ++i)
temp_xind[i] = (int32_t) x.ind[i];
MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status);
delete [] temp_xind;
if(!indexisvalue)
{
trxnums = new NV[trxlocnz];
MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status);
}
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces)
}
/**
* Step 2 of the sparse SpMV algorithm
* @param[in,out] trxinds, trxnums { deallocated }
* @param[in,out] indacc, numacc { allocated }
* @param[in,out] accnz { set }
* @param[in] trxlocnz, lenuntil, indexisvalue
**/
template<typename IU, typename NV>
void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums,
int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue)
{
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
accnz = std::accumulate(colnz, colnz+colneighs, 0);
indacc = new int32_t[accnz];
numacc = new NV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
// This will happen when n/sqrt(p) > 2^31
// Currently we can solve a small problem (scale 32) with 4096 processor
// For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180
// 2^35 / 180 ~ 2^29 / 3 which is not an issue !
#ifdef TIMING
double t0=MPI_Wtime();
#endif
MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld);
delete [] trxinds;
if(indexisvalue)
{
IU lenuntilcol;
if(colrank == 0) lenuntilcol = lenuntil;
MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld);
for(int i=0; i< accnz; ++i) // fill numerical values from indices
{
numacc[i] = indacc[i] + lenuntilcol;
}
}
else
{
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld);
delete [] trxnums;
}
#ifdef TIMING
double t1=MPI_Wtime();
cblas_allgathertime += (t1-t0);
#endif
DeleteAll(colnz,dpls);
}
/**
* Step 3 of the sparse SpMV algorithm, with the semiring
* @param[in,out] optbuf {scratch space for all-to-all (fold) communication}
* @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit}
* @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created}
**/
template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc,
int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
if(optbuf.totmax > 0) // graph500 optimization enabled
{
if(A.spSeq->getnsplit() > 0)
{
// optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded
generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
else
{
generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue);
}
DeleteAll(indacc,numacc);
}
else
{
if(A.spSeq->getnsplit() > 0)
{
// sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded
int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA);
DeleteAll(indacc, numacc);
for(int i=0; i<rowneighs-1; ++i)
sendcnt[i] = sdispls[i+1] - sdispls[i];
sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1];
}
else
{
// default SpMSpV
std::vector< int32_t > indy;
std::vector< OVT > numy;
generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA);
DeleteAll(indacc, numacc);
int32_t bufsize = indy.size(); // as compact as possible
sendindbuf = new int32_t[bufsize];
sendnumbuf = new OVT[bufsize];
int32_t perproc = A.getlocalrows() / rowneighs;
int k = 0; // index to buffer
for(int i=0; i<rowneighs; ++i)
{
int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc;
while(k < bufsize && indy[k] < end_this)
{
sendindbuf[k] = indy[k] - i*perproc;
sendnumbuf[k] = numy[k];
++sendcnt[i];
++k;
}
}
sdispls = new int[rowneighs]();
std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1);
//#endif
}
}
}
// non threaded
template <typename SR, typename IU, typename OVT>
void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int32_t hsize = 0;
int32_t inf = std::numeric_limits<int32_t>::min();
int32_t sup = std::numeric_limits<int32_t>::max();
KNHeap< int32_t, int32_t > sHeap(sup, inf);
int * processed = new int[nlists]();
for(int i=0; i<nlists; ++i)
{
if(listSizes[i] > 0)
{
// key, list_id
sHeap.insert(indsvec[i][0], i);
++hsize;
}
}
int32_t key, locv;
if(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
mergedind.push_back( static_cast<IU>(key));
mergednum.push_back(numsvec[locv][0]); // nothing is processed yet
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
while(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
if(mergedind.back() == static_cast<IU>(key))
{
mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]);
// ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection
// We can just skip this addition operator (if it's a max/min select)
}
else
{
mergedind.push_back(static_cast<IU>(key));
mergednum.push_back(numsvec[locv][processed[locv]]);
}
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
DeleteAll(processed);
}
template <typename SR, typename IU, typename OVT>
void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int nthreads=1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
int nsplits = 4*nthreads; // oversplit for load balance
nsplits = std::min(nsplits, (int)maxindex);
std::vector< std::vector<int32_t> > splitters(nlists);
for(int k=0; k< nlists; k++)
{
splitters[k].resize(nsplits+1);
splitters[k][0] = static_cast<int32_t>(0);
#pragma omp parallel for
for(int i=1; i< nsplits; i++)
{
IU cur_idx = i * (maxindex/nsplits);
auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx);
splitters[k][i] = (int32_t) (it - indsvec[k]);
}
splitters[k][nsplits] = listSizes[k];
}
// ------ perform merge in parallel ------
std::vector<std::vector<IU>> indsBuf(nsplits);
std::vector<std::vector<OVT>> numsBuf(nsplits);
//TODO: allocate these vectors here before calling MergeContributions
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::vector<int32_t *> tIndsVec(nlists);
std::vector<OVT *> tNumsVec(nlists);
std::vector<int> tLengths(nlists);
for(int j=0; j< nlists; ++j)
{
tIndsVec[j] = indsvec[j] + splitters[j][i];
tNumsVec[j] = numsvec[j] + splitters[j][i];
tLengths[j]= splitters[j][i+1] - splitters[j][i];
}
MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]);
}
// ------ concatenate merged tuples processed by threads ------
std::vector<IU> tdisp(nsplits+1);
tdisp[0] = 0;
for(int i=0; i<nsplits; ++i)
{
tdisp[i+1] = tdisp[i] + indsBuf[i].size();
}
mergedind.resize(tdisp[nsplits]);
mergednum.resize(tdisp[nsplits]);
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]);
std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]);
}
}
/**
* This version is the most flexible sparse matrix X sparse vector [Used in KDT]
* It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT)
* without relying on automatic type promotion
* Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x.
*/
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,
bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA)
{
CheckSpMVCompliance(A,x);
optbuf.MarkEmpty();
y.glen = A.getnrow(); // in case it is not set already
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int accnz;
int32_t trxlocnz;
IU lenuntil;
int32_t *trxinds, *indacc;
IVT *trxnums, *numacc;
#ifdef TIMING
double t0=MPI_Wtime();
#endif
TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue);
#ifdef TIMING
double t1=MPI_Wtime();
cblas_transvectime += (t1-t0);
#endif
if(x.commGrid->GetGridRows() > 1)
{
AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set
}
else
{
accnz = trxlocnz;
indacc = trxinds; // aliasing ptr
numacc = trxnums; // aliasing ptr
}
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
int * sendcnt = new int[rowneighs]();
int32_t * sendindbuf;
OVT * sendnumbuf;
int * sdispls;
#ifdef TIMING
double t2=MPI_Wtime();
#endif
LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated
#ifdef TIMING
double t3=MPI_Wtime();
cblas_localspmvtime += (t3-t2);
#endif
if(x.commGrid->GetGridCols() == 1)
{
y.ind.resize(sendcnt[0]);
y.num.resize(sendcnt[0]);
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = optbuf.inds[i];
y.num[i] = optbuf.nums[i];
}
}
else
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = sendindbuf[i];
y.num[i] = sendnumbuf[i];
}
DeleteAll(sendindbuf, sendnumbuf,sdispls);
}
delete [] sendcnt;
return;
}
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
// receive displacements are exact whereas send displacements have slack
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
int32_t * recvindbuf = new int32_t[totrecv];
OVT * recvnumbuf = new OVT[totrecv];
#ifdef TIMING
double t4=MPI_Wtime();
#endif
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
delete [] sendcnt;
}
else
{
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls);
}
#ifdef TIMING
double t5=MPI_Wtime();
cblas_alltoalltime += (t5-t4);
#endif
#ifdef TIMING
double t6=MPI_Wtime();
#endif
//MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs);
// free memory of y, in case it was aliased
std::vector<IU>().swap(y.ind);
std::vector<OVT>().swap(y.num);
std::vector<int32_t *> indsvec(rowneighs);
std::vector<OVT *> numsvec(rowneighs);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<rowneighs; i++)
{
indsvec[i] = recvindbuf+rdispls[i];
numsvec[i] = recvnumbuf+rdispls[i];
}
#ifdef THREADED
MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength());
#else
MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num);
#endif
DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf);
#ifdef TIMING
double t7=MPI_Wtime();
cblas_mergeconttime += (t7-t6);
#endif
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf)
{
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
/**
* Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type
* If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors)
**/
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors
SpMV<SR>(A, x, y, indexisvalue, optbuf);
return y;
}
/**
* Parallel dense SpMV
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x )
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xsize = (int) x.LocArrSize();
int trxsize = 0;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status);
NUV * trxnums = new NUV[trxsize];
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status);
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colsize = new int[colneighs];
colsize[colrank] = trxsize;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colsize, colsize+colneighs-1, dpls+1);
int accsize = std::accumulate(colsize, colsize+colneighs, 0);
NUV * numacc = new NUV[accsize];
MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld);
delete [] trxnums;
// serial SpMV with dense vector
T_promote id = SR::id();
IU ysize = A.getlocalrows();
T_promote * localy = new T_promote[ysize];
std::fill_n(localy, ysize, id);
#ifdef THREADED
dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy);
#else
dcsc_gespmv<SR>(*(A.spSeq), numacc, localy);
#endif
DeleteAll(numacc,colsize, dpls);
// FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id)
FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id);
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
IU begptr, endptr;
for(int i=0; i< rowneighs; ++i)
{
begptr = y.RowLenUntil(i);
if(i == rowneighs-1)
{
endptr = ysize;
}
else
{
endptr = y.RowLenUntil(i+1);
}
MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld);
}
delete [] localy;
return y;
}
/**
* \TODO: Old version that is no longer considered optimal
* Kept for legacy purposes
* To be removed when other functionals are fully tested.
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x)
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xlocnz = (int) x.getlocnnz();
int trxlocnz = 0;
int roffst = x.RowLenUntil();
int offset;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status);
MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status);
IU * trxinds = new IU[trxlocnz];
NUV * trxnums = new NUV[trxlocnz];
MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status);
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status);
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces)
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
int accnz = std::accumulate(colnz, colnz+colneighs, 0);
IU * indacc = new IU[accnz];
NUV * numacc = new NUV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld);
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld);
DeleteAll(trxinds, trxnums);
// serial SpMV with sparse vector
std::vector< int32_t > indy;
std::vector< T_promote > numy;
int32_t * tmpindacc = new int32_t[accnz];
for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i];
delete [] indacc;
dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication
DeleteAll(tmpindacc, numacc);
DeleteAll(colnz, dpls);
FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors
IU yintlen = y.MyRowLength();
int rowneighs;
MPI_Comm_size(RowWorld,&rowneighs);
std::vector< std::vector<IU> > sendind(rowneighs);
std::vector< std::vector<T_promote> > sendnum(rowneighs);
typename std::vector<int32_t>::size_type outnz = indy.size();
for(typename std::vector<IU>::size_type i=0; i< outnz; ++i)
{
IU locind;
int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind);
sendind[rown].push_back(locind);
sendnum[rown].push_back(numy[i]);
}
IU * sendindbuf = new IU[outnz];
T_promote * sendnumbuf = new T_promote[outnz];
int * sendcnt = new int[rowneighs];
int * sdispls = new int[rowneighs];
for(int i=0; i<rowneighs; ++i)
sendcnt[i] = sendind[i].size();
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
IU * recvindbuf = new IU[totrecv];
T_promote * recvnumbuf = new T_promote[totrecv];
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]);
std::vector<IU>().swap(sendind[i]);
}
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]);
std::vector<T_promote>().swap(sendnum[i]);
}
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf);
DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
// define a SPA-like data structure
IU ysize = y.MyLocLength();
T_promote * localy = new T_promote[ysize];
bool * isthere = new bool[ysize];
std::vector<IU> nzinds; // nonzero indices
std::fill_n(isthere, ysize, false);
for(int i=0; i< totrecv; ++i)
{
if(!isthere[recvindbuf[i]])
{
localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment
nzinds.push_back(recvindbuf[i]);
isthere[recvindbuf[i]] = true;
}
else
{
localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]);
}
}
DeleteAll(isthere, recvindbuf, recvnumbuf);
sort(nzinds.begin(), nzinds.end());
int nnzy = nzinds.size();
y.ind.resize(nnzy);
y.num.resize(nnzy);
for(int i=0; i< nnzy; ++i)
{
y.ind[i] = nzinds[i];
y.num[i] = localy[nzinds[i]];
}
delete [] localy;
return y;
}
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote;
if(*(A.commGrid) == *(B.commGrid))
{
DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) );
return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,N_promote,DER_promote >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
// plain adapter
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER>
EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true)
{
return EWiseApply<RETT, RETDER>(A, B,
EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op),
allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true);
}
// end adapter
/**
* if exclude is true, then we prune all entries W[i] != zero from V
* if exclude is false, then we perform a proper elementwise multiplication
**/
template <typename IU, typename NU1, typename NU2>
FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero)
{
typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::cerr << "Vector dimensions don't match for EWiseMult\n";
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= V.getlocnnz();
if(exclude)
{
#if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial
int actual_splits = cblas_splits * 1; // 1 is the parallel slackness
std::vector <IU> tlosizes (actual_splits, 0);
std::vector < std::vector<IU> > tlinds(actual_splits);
std::vector < std::vector<T_promote> > tlnums(actual_splits);
IU tlsize = size / actual_splits;
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t = 0; t < actual_splits; ++t)
{
IU tlbegin = t*tlsize;
IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize;
for(IU i=tlbegin; i<tlend; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
tlinds[t].push_back(V.ind[i]);
tlnums[t].push_back(V.num[i]);
tlosizes[t]++;
}
}
}
std::vector<IU> prefix_sum(actual_splits+1,0);
std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1);
Product.ind.resize(prefix_sum[actual_splits]);
Product.num.resize(prefix_sum[actual_splits]);
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t=0; t< actual_splits; ++t)
{
std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]);
std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]);
}
#else
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i]);
}
}
#endif
}
else
{
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] != zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i] * W.arr[V.ind[i]]);
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
Threaded EWiseApply. Only called internally from EWiseApply.
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply_threaded
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
int nthreads=1;
#ifdef _OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
// temporary result vectors per thread
std::vector<std::vector<IU>> tProductInd(nthreads);
std::vector<std::vector<T_promote>> tProductVal(nthreads);
IU perthread; //chunk of tProductInd or tProductVal allocated to each thread
if (allowVNulls)
perthread = size/nthreads;
else
perthread = spsize/nthreads;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
IU tStartIdx = perthread * curthread;
IU tNextIdx = perthread * (curthread+1);
if (allowVNulls)
{
if(curthread == nthreads-1) tNextIdx = size;
// get sparse part for the current thread
auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx);
IU tSpIdx = (IU) std::distance(V.ind.begin(), it);
// iterate over the dense vector
for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx)
{
if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false));
}
tSpIdx++;
}
else
{
if (_doOp(Vzero, W.arr[tIdx], true, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false));
}
}
}
}
else // iterate over the sparse vector
{
if(curthread == nthreads-1) tNextIdx = spsize;
for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false))
{
tProductInd[curthread].push_back( V.ind[tSpIdx]);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false));
}
}
}
}
std::vector<IU> tdisp(nthreads+1);
tdisp[0] = 0;
for(int i=0; i<nthreads; ++i)
{
tdisp[i+1] = tdisp[i] + tProductInd[i].size();
}
// copy results from temporary vectors
Product.ind.resize(tdisp[nthreads]);
Product.num.resize(tdisp[nthreads]);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]);
std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]);
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0)
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
#ifdef _OPENMP
return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp);
#else
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
//FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there??
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
IU sp_iter = 0;
if (allowVNulls)
{
// iterate over the dense vector
for(IU i=0; i<size; ++i)
{
if(sp_iter < spsize && V.ind[sp_iter] == i)
{
if (_doOp(V.num[sp_iter], W.arr[i], false, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false));
}
sp_iter++;
}
else
{
if (_doOp(Vzero, W.arr[i], true, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false));
}
}
}
}
else
{
// iterate over the sparse vector
for(sp_iter = 0; sp_iter < spsize; ++sp_iter)
{
if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false))
{
Product.ind.push_back(V.ind[sp_iter]);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false));
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
#endif
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
* !allowVNulls && !allowWNulls => intersection
* !allowVNulls && allowWNulls => operate on all elements of V
* allowVNulls && !allowWNulls => operate on all elements of W
* allowVNulls && allowWNulls => union
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...)
* For intersection, Vzero and Wzero are irrelevant
* ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses?
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp)
{
typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
typename std::vector< IU >::const_iterator indV = V.ind.begin();
typename std::vector< NU1 >::const_iterator numV = V.num.begin();
typename std::vector< IU >::const_iterator indW = W.ind.begin();
typename std::vector< NU2 >::const_iterator numW = W.num.begin();
while (indV < V.ind.end() && indW < W.ind.end())
{
if (*indV == *indW)
{
// overlap
if (allowIntersect)
{
if (_doOp(*numV, *numW, false, false))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, *numW, false, false));
}
}
indV++; numV++;
indW++; numW++;
}
else if (*indV < *indW)
{
// V has value but W does not
if (allowWNulls)
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
}
indV++; numV++;
}
else //(*indV > *indW)
{
// W has value but V does not
if (allowVNulls)
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
}
indW++; numW++;
}
}
// clean up
while (allowWNulls && indV < V.ind.end())
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
indV++; numV++;
}
while (allowVNulls && indW < W.ind.end())
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
indW++; numW++;
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
// plain callback versions
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, Vzero, true);
}
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// sampling-based nnz estimation via SpMV
// @OGUZ-NOTE This is not based on SUMMA, do not use. Estimates the number of
// nonzeros in the final output matrix.
#define NROUNDS 5
typedef std::array<float, NROUNDS> samparr_t;
template <typename NZT>
struct promote_trait<NZT, samparr_t>
{
typedef samparr_t T_promote;
};
class SamplesSaveHandler
{
public:
template<typename c, typename t, typename V>
void save(std::basic_ostream<c, t> &os,
std::array<V, NROUNDS> &sample_vec,
int64_t index)
{
for (auto it = sample_vec.begin(); it != sample_vec.end(); ++it)
os << *it << " ";
}
};
template<typename NZT>
struct SelectMinxSR
{
static samparr_t id()
{
samparr_t arr;
for (auto it = arr.begin(); it != arr.end(); ++it)
*it = std::numeric_limits<float>::max();
return arr;
}
static bool returnedSAID()
{
return false;
}
static samparr_t
add (const samparr_t &arg1, const samparr_t &arg2)
{
samparr_t out;
for (int i = 0; i < NROUNDS; ++i)
out[i] = std::min(arg1[i], arg2[i]);
return out;
}
static samparr_t
multiply (const NZT arg1, const samparr_t &arg2)
{
return arg2;
}
static void axpy (const NZT a, const samparr_t &x, samparr_t &y)
{
y = add(y, multiply(a, x));
}
static MPI_Op mpi_op()
{
static MPI_Op mpiop;
static bool exists = false;
if (exists)
return mpiop;
else
{
MPI_Op_create(MPI_func, true, &mpiop);
exists = true;
return mpiop;
}
}
static void
MPI_func(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
{
samparr_t *in = static_cast<samparr_t *>(invec);
samparr_t *inout = static_cast<samparr_t *>(inoutvec);
for (int i = 0; i < *len; ++i)
inout[i] = add(inout[i], in[i]);
}
};
template <typename IU, typename NU1, typename NU2,
typename UDERA, typename UDERB>
int64_t
EstPerProcessNnzSpMV(
SpParMat<IU, NU1, UDERA> &A, SpParMat<IU, NU2, UDERB> &B
)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
float lambda = 1.0f;
int nthds = 1;
#ifdef THREADED
#pragma omp parallel
#endif
{
nthds = omp_get_num_threads();
}
if (myrank == 0)
std::cout << "taking transposes." << std::endl;
A.Transpose();
B.Transpose();
if (myrank == 0)
std::cout << "setting initial samples." << std::endl;
samparr_t sa;
FullyDistVec<IU, samparr_t> samples_init(A.getcommgrid(), A.getncol(), sa);
#ifdef THREADED
#pragma omp parallel
#endif
{
std::default_random_engine gen;
std::exponential_distribution<float> exp_dist(lambda);
#ifdef THREADED
#pragma omp parallel for
#endif
for (IU i = 0; i < samples_init.LocArrSize(); ++i)
{
samparr_t tmp;
for (auto it = tmp.begin(); it != tmp.end(); ++it)
*it = exp_dist(gen);
samples_init.SetLocalElement(i, tmp);
}
}
// std::string fname("samples_init");
// samples_init.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing mid samples." << std::endl;
FullyDistVec<IU, samparr_t> samples_mid =
SpMV<SelectMinxSR<NU1> > (A, samples_init);
// fname = "samples_mid";
// samples_mid.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing final samples." << std::endl;
FullyDistVec<IU, samparr_t> samples_final =
SpMV<SelectMinxSR<NU2> > (B, samples_mid);
// fname = "samples_final";
// samples_final.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing nnz estimation." << std::endl;
float nnzest = 0.0f;
std::cout << myrank << "samples_final loc size: "
<< samples_final.LocArrSize() << std::endl;
const samparr_t *lsamples = samples_final.GetLocArr();
#ifdef THREADED
#pragma omp parallel for reduction (+:nnzest)
#endif
for (IU i = 0; i < samples_final.LocArrSize(); ++i)
{
float tmp = 0.0f;
for (auto it = lsamples[i].begin(); it != lsamples[i].end(); ++it)
tmp += *it;
nnzest += static_cast<float>(NROUNDS - 1) / tmp;
}
if (myrank == 0)
std::cout << "taking transposes again." << std::endl;
int64_t nnzC_est = nnzest;
int64_t nnzC_tot = 0;
MPI_Allreduce(&nnzC_est, &nnzC_tot, 1, MPIType<int64_t>(), MPI_SUM,
(B.commGrid)->GetWorld());
if (myrank == 0)
std::cout << "sampling-based spmv est tot: " << nnzC_tot << std::endl;
// revert back
A.Transpose();
B.Transpose();
return nnzC_tot;
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDER1, typename UDER2>
SpParMat3D<IU,NUO,UDERO> Mult_AnXBn_SUMMA3D(SpParMat3D<IU,NU1,UDER1> & A, SpParMat3D<IU,NU2,UDER2> & B){
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
typedef typename UDERO::LocalIT LIC;
typedef typename UDER1::LocalIT LIA;
typedef typename UDER2::LocalIT LIB;
#ifdef TIMING
double t0, t1, t2, t3;
#endif
/*
* Check if A and B are multipliable
* */
if(A.getncol() != B.getnrow()){
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
/*
* Calculate, accross fibers, which process should get how many columns after redistribution
* */
vector<LIB> divisions3d;
// Calcuclate split boundaries as if all contents of the layer is being re-distributed along fiber
// These boundaries will be used later on
B.CalculateColSplitDistributionOfLayer(divisions3d);
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* SUMMA Starts
* */
int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(),
(B.GetLayerMat()->getcommgrid()).get(),
stages, dummy, dummy);
IU C_m = A.GetLayerMat()->seqptr()->getnrow();
IU C_n = B.GetLayerMat()->seqptr()->getncol();
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages);
SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() );
SpParHelper::GetSetSizes( *(B.GetLayerMat()->seqptr()), BRecvSizes, (B.GetLayerMat()->getcommgrid())->GetColWorld() );
// Remotely fetched matrices are stored as pointers
UDERO * ARecv;
UDER2 * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow();
int Bself = (B.GetLayerMat()->getcommgrid())->GetRankInProcCol();
double Abcast_time = 0;
double Bbcast_time = 0;
double Local_multiplication_time = 0;
for(int i = 0; i < stages; ++i) {
std::vector<IU> ess;
if(i == Aself){
ARecv = A.GetLayerMat()->seqptr(); // shallow-copy
}
else{
ess.resize(UDER1::esscount);
for(int j=0; j<UDER1::esscount; ++j) {
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDER1(); // first, create the object
}
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Aself != i) {
ARecv->Create(ess);
}
Arr<IU,NU1> Aarrinfo = ARecv->GetArrays();
for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld());
}
for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_Abcasttime += (t3-t2);
Abcast_time += (t3-t2);
#endif
ess.clear();
if(i == Bself){
BRecv = B.GetLayerMat()->seqptr(); // shallow-copy
}
else{
ess.resize(UDER2::esscount);
for(int j=0; j<UDER2::esscount; ++j) {
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDER2();
}
MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld());
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Bself != i) {
BRecv->Create(ess);
}
Arr<IU,NU2> Barrinfo = BRecv->GetArrays();
for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld());
}
for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_Bbcasttime += (t3-t2);
Bbcast_time += (t3-t2);
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself, // 'delete B' condition
false); // not to sort each column
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_localspgemmtime += (t3-t2);
Local_multiplication_time += (t3-t2);
#endif
if(!C_cont->isZero()) tomerge.push_back(C_cont);
}
SpHelper::deallocate2D(ARecvSizes, UDER1::esscount);
SpHelper::deallocate2D(BRecvSizes, UDER2::esscount);
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_SUMMAmergetime += (t3-t2);
#endif
#ifdef TIMING
if(myrank == 0){
fprintf(stderr, "[SUMMA3D]\tAbcast_time: %lf\n", Abcast_time);
fprintf(stderr, "[SUMMA3D]\tBbcast_time: %lf\n", Bbcast_time);
fprintf(stderr, "[SUMMA3D]\tLocal_multiplication_time: %lf\n", Local_multiplication_time);
fprintf(stderr, "[SUMMA3D]\tSUMMA Merge time: %lf\n", (t3-t2));
}
#endif
/*
* SUMMA Ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_SUMMAtime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tSUMMA time: %lf\n", (t1-t0));
#endif
/*
* 3d-reduction starts
* */
#ifdef TIMING
//MPI_Barrier(getcommgrid3D()->GetWorld());
t0 = MPI_Wtime();
#endif
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
/*
* Create a profile with information regarding data to be sent and received between layers
* These memory allocation needs to be `int` specifically because some of these arrays would be used in communication
* This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement
* */
int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
vector<IU> divisions3dPrefixSum(divisions3d.size());
divisions3dPrefixSum[0] = 0;
std::partial_sum(divisions3d.begin(), divisions3d.end()-1, divisions3dPrefixSum.begin()+1);
ColLexiCompare<IU,NUO> comp;
IU totsend = C_tuples->getnnz();
#pragma omp parallel for
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
IU start_col = divisions3dPrefixSum[i];
IU end_col = divisions3dPrefixSum[i] + divisions3d[i];
std::tuple<IU, IU, NUO> search_tuple_start(0, start_col, NUO());
std::tuple<IU, IU, NUO> search_tuple_end(0, end_col, NUO());
std::tuple<IU, IU, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp);
std::tuple<IU, IU, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp);
// This type casting is important from semantic point of view
sendcnt[i] = (int)(end_it - start_it);
sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk
sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk
sendprfl[i*3+2] = (int)(divisions3d[i]); // Number of columns in ith chunk
}
std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1);
// Send profile ready. Now need to update the tuples to reflect correct column id after column split.
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
#pragma omp parallel for schedule(static)
for(int j = 0; j < sendcnt[i]; j++){
std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - divisions3dPrefixSum[i];
}
}
MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->fiberWorld);
for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3];
std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1);
IU totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0));
std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv])));
#ifdef TIMING
t2 = MPI_Wtime();
#endif
MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->fiberWorld);
delete C_tuples;
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tAlltoallv: %lf\n", (t3-t2));
#endif
vector<SpTuples<IU, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers());
#pragma omp parallel for
for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){
recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false);
}
// Free all memory except tempTuples; Because that memory is holding data of newly created local matrices after receiving.
DeleteAll(sendcnt, sendprfl, sdispls);
DeleteAll(recvcnt, recvprfl, rdispls);
MPI_Type_free(&MPI_tuple);
/*
* 3d-reduction ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_reductiontime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tReduction time: %lf\n", (t1-t0));
#endif
#ifdef TIMING
t0 = MPI_Wtime();
t2 = MPI_Wtime();
#endif
/*
* 3d-merge starts
* */
SpTuples<IU, NUO> * merged_tuples = MultiwayMergeHash<SR, IU, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tMultiway Merge: %lf\n", (t3-t2));
mcl3d_layer_nnzc += merged_tuples->getnnz();
#endif
//Create SpDCCol and delete merged_tuples;
UDERO * localResultant = new UDERO(*merged_tuples, false);
// Do not delete elements of recvChunks, because that would give segmentation fault due to double free
//delete [] recvTuples;
::operator delete(recvTuples);
for(int i = 0; i < recvChunks.size(); i++){
recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault
delete recvChunks[i];
}
vector<SpTuples<IU,NUO>*>().swap(recvChunks);
/*
* 3d-merge ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_3dmergetime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\t3D Merge time: %lf\n", (t1-t0));
#endif
std::shared_ptr<CommGrid3D> grid3d;
grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial()));
SpParMat3D<IU, NUO, UDERO> C(localResultant, grid3d, A.isColSplit(), A.isSpecial());
return C;
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat3D<IU, NUO, UDERO> MemEfficientSpGEMM3D(SpParMat3D<IU, NU1, UDERA> & A, SpParMat3D<IU, NU2, UDERB> & B,
int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory){
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
/*
* Check if A and B are multipliable
* */
if(A.getncol() != B.getnrow()){
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
/*
* If provided number of phase is too low or too high then reset value of phase as 1
* */
if(phases < 1 || phases >= B.getncol()){
SpParHelper::Print("[MemEfficientSpGEMM3D]\tThe value of phases is too small or large. Resetting to 1.\n");
phases = 1;
}
double t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; // To time different parts of the function
#ifdef TIMING
MPI_Barrier(B.getcommgrid3D()->GetWorld());
t0 = MPI_Wtime();
#endif
/*
* If per process memory is provided then calculate number of phases
* Otherwise, proceed to multiplication.
* */
if(perProcessMemory > 0) {
int p, calculatedPhases;
MPI_Comm_size(A.getcommgrid3D()->GetLayerWorld(),&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
int64_t lannz = A.GetLayerMat()->getlocalnnz();
int64_t gannz = 0;
// Get maximum number of nnz owned by one process
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetWorld());
//int64_t ginputMem = gannz * perNNZMem_in * 4; // Four pieces per process: one piece of own A and B, one piece of received A and B
int64_t ginputMem = gannz * perNNZMem_in * 5; // One extra copy for safety
// Estimate per layer nnz after multiplication. After this estimation each process would know an estimation of
// how many nnz the corresponding layer will have after the layerwise operation.
int64_t asquareNNZ = EstPerProcessNnzSUMMA(*(A.GetLayerMat()), *(B.GetLayerMat()), true);
int64_t gasquareNNZ;
MPI_Allreduce(&asquareNNZ, &gasquareNNZ, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetFiberWorld());
// Atmost two copies, one of a process's own, another received from fiber reduction
int64_t gasquareMem = gasquareNNZ * perNNZMem_out * 2;
// Calculate estimated average degree after multiplication
int64_t d = ceil( ( ( gasquareNNZ / B.getcommgrid3D()->GetGridLayers() ) * sqrt(p) ) / B.GetLayerMat()->getlocalcols() );
// Calculate per column nnz how left after k-select. Minimum of average degree and k-select parameters.
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
//estimate output memory
int64_t postKselectOutputNNZ = ceil(( (B.GetLayerMat()->getlocalcols() / B.getcommgrid3D()->GetGridLayers() ) * k)/sqrt(p)); // If kselect is run
int64_t postKselectOutputMem = postKselectOutputNNZ * perNNZMem_out * 2;
double remainingMem = perProcessMemory*1000000000 - ginputMem - postKselectOutputMem;
int64_t kselectMem = B.GetLayerMat()->getlocalcols() * k * sizeof(NUO) * 3;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
if(remainingMem > 0){
calculatedPhases = ceil( (gasquareMem + kselectMem) / remainingMem ); // If kselect is run
}
else calculatedPhases = -1;
int gCalculatedPhases;
MPI_Allreduce(&calculatedPhases, &gCalculatedPhases, 1, MPI_INT, MPI_MAX, A.getcommgrid3D()->GetFiberWorld());
if(gCalculatedPhases > phases) phases = gCalculatedPhases;
}
else{
// Do nothing
}
#ifdef TIMING
MPI_Barrier(B.getcommgrid3D()->GetWorld());
t1 = MPI_Wtime();
mcl3d_symbolictime+=(t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tSymbolic stage time: %lf\n", (t1-t0));
#endif
/*
* Calculate, accross fibers, which process should get how many columns after redistribution
* */
vector<LIB> divisions3d;
// Calculate split boundaries as if all contents of the layer is being re-distributed along fiber
// These boundaries will be used later on
B.CalculateColSplitDistributionOfLayer(divisions3d);
/*
* Split B according to calculated number of phases
* For better load balancing split B into nlayers*phases chunks
* */
vector<UDERB*> PiecesOfB;
vector<UDERB*> tempPiecesOfB;
UDERB CopyB = *(B.GetLayerMat()->seqptr());
CopyB.ColSplit(divisions3d, tempPiecesOfB); // Split B into `nlayers` chunks at first
for(int i = 0; i < tempPiecesOfB.size(); i++){
vector<UDERB*> temp;
tempPiecesOfB[i]->ColSplit(phases, temp); // Split each chunk of B into `phases` chunks
for(int j = 0; j < temp.size(); j++){
PiecesOfB.push_back(temp[j]);
}
}
vector<UDERO> toconcatenate;
if(myrank == 0){
fprintf(stderr, "[MemEfficientSpGEMM3D]\tRunning with phase: %d\n", phases);
}
for(int p = 0; p < phases; p++){
/*
* At the start of each phase take appropriate pieces from previously created pieces of local B matrix
* Appropriate means correct pieces so that 3D-merge can be properly load balanced.
* */
vector<LIB> lbDivisions3d; // load balance friendly division
LIB totalLocalColumnInvolved = 0;
vector<UDERB*> targetPiecesOfB; // Pieces of B involved in current phase
for(int i = 0; i < PiecesOfB.size(); i++){
if(i % phases == p){
targetPiecesOfB.push_back(new UDERB(*(PiecesOfB[i])));
lbDivisions3d.push_back(PiecesOfB[i]->getncol());
totalLocalColumnInvolved += PiecesOfB[i]->getncol();
}
}
/*
* Create new local matrix by concatenating appropriately picked pieces
* */
UDERB * OnePieceOfB = new UDERB(0, (B.GetLayerMat())->seqptr()->getnrow(), totalLocalColumnInvolved, 0);
OnePieceOfB->ColConcatenate(targetPiecesOfB);
vector<UDERB*>().swap(targetPiecesOfB);
/*
* Create a new layer-wise distributed matrix with the newly created local matrix for this phase
* This matrix is used in SUMMA multiplication of respective layer
* */
SpParMat<IU, NU2, UDERB> OnePieceOfBLayer(OnePieceOfB, A.getcommgrid3D()->layerWorld);
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* SUMMA Starts
* */
int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(),
(OnePieceOfBLayer.getcommgrid()).get(),
stages, dummy, dummy);
IU C_m = A.GetLayerMat()->seqptr()->getnrow();
IU C_n = OnePieceOfBLayer.seqptr()->getncol();
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() );
SpParHelper::GetSetSizes( *(OnePieceOfBLayer.seqptr()), BRecvSizes, (OnePieceOfBLayer.getcommgrid())->GetColWorld() );
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow();
int Bself = (OnePieceOfBLayer.getcommgrid())->GetRankInProcCol();
double Abcast_time = 0;
double Bbcast_time = 0;
double Local_multiplication_time = 0;
for(int i = 0; i < stages; ++i) {
std::vector<IU> ess;
if(i == Aself){
ARecv = A.GetLayerMat()->seqptr(); // shallow-copy
}
else{
ess.resize(UDERA::esscount);
for(int j=0; j<UDERA::esscount; ++j) {
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Aself != i) {
ARecv->Create(ess);
}
Arr<IU,NU1> Aarrinfo = ARecv->GetArrays();
for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld());
}
for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_Abcasttime += (t3-t2);
Abcast_time += (t3-t2);
#endif
ess.clear();
if(i == Bself){
BRecv = OnePieceOfBLayer.seqptr(); // shallow-copy
}
else{
ess.resize(UDERB::esscount);
for(int j=0; j<UDERB::esscount; ++j) {
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld());
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Bself != i) {
BRecv->Create(ess);
}
Arr<IU,NU2> Barrinfo = BRecv->GetArrays();
for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld());
}
for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_Bbcasttime += (t3-t2);
Bbcast_time += (t3-t2);
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself, // 'delete B' condition
false); // not to sort each column
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_localspgemmtime += (t3-t2);
Local_multiplication_time += (t3-t2);
#endif
if(!C_cont->isZero()) tomerge.push_back(C_cont);
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_SUMMAmergetime += (t3-t2);
#endif
#ifdef TIMING
if(myrank == 0){
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAbcast_time: %lf\n", p, Abcast_time);
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tBbcast_time: %lf\n", p, Bbcast_time);
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tLocal_multiplication_time: %lf\n", p, Local_multiplication_time);
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA Merge time: %lf\n", p, (t3-t2));
}
#endif
/*
* SUMMA Ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_SUMMAtime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA time: %lf\n", p, (t1-t0));
#endif
#ifdef TIMING
mcl3d_proc_nnzc_pre_red += C_tuples->getnnz();
#endif
/*
* 3d-reduction starts
* */
#ifdef TIMING
t0 = MPI_Wtime();
t2 = MPI_Wtime();
#endif
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
/*
* Create a profile with information regarding data to be sent and received between layers
* These memory allocation needs to be `int` specifically because some of these arrays would be used in communication
* This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement
* */
int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
vector<IU> lbDivisions3dPrefixSum(lbDivisions3d.size());
lbDivisions3dPrefixSum[0] = 0;
std::partial_sum(lbDivisions3d.begin(), lbDivisions3d.end()-1, lbDivisions3dPrefixSum.begin()+1);
ColLexiCompare<IU,NUO> comp;
IU totsend = C_tuples->getnnz();
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of alltoall information: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
#pragma omp parallel for
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
IU start_col = lbDivisions3dPrefixSum[i];
IU end_col = lbDivisions3dPrefixSum[i] + lbDivisions3d[i];
std::tuple<IU, IU, NUO> search_tuple_start(0, start_col, NUO());
std::tuple<IU, IU, NUO> search_tuple_end(0, end_col, NUO());
std::tuple<IU, IU, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp);
std::tuple<IU, IU, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp);
// This type casting is important from semantic point of view
sendcnt[i] = (int)(end_it - start_it);
sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk
sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk
sendprfl[i*3+2] = (int)(lbDivisions3d[i]); // Number of columns in ith chunk
}
std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1);
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoall data ready: %lf\n", p, (t3-t2));
#endif
// Send profile ready. Now need to update the tuples to reflect correct column id after column split.
#ifdef TIMING
t2 = MPI_Wtime();
#endif
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
#pragma omp parallel for schedule(static)
for(int j = 0; j < sendcnt[i]; j++){
std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - lbDivisions3dPrefixSum[i];
}
}
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoallv data ready: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->fiberWorld);
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoall: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3];
std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1);
IU totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0));
std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv])));
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of receive data: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->fiberWorld);
delete C_tuples;
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoallv: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
vector<SpTuples<IU, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers());
#pragma omp parallel for
for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){
recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false);
}
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\trecvChunks creation: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
// Free all memory except tempTuples; Because that is holding data of newly created local matrices after receiving.
DeleteAll(sendcnt, sendprfl, sdispls);
DeleteAll(recvcnt, recvprfl, rdispls);
MPI_Type_free(&MPI_tuple);
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMemory freeing: %lf\n", p, (t3-t2));
#endif
/*
* 3d-reduction ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_reductiontime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tReduction time: %lf\n", p, (t1-t0));
#endif
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* 3d-merge starts
* */
SpTuples<IU, NUO> * merged_tuples = MultiwayMergeHash<SR, IU, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_3dmergetime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\t3D Merge time: %lf\n", p, (t1-t0));
mcl3d_proc_nnzc_post_red += merged_tuples->getnnz();
#endif
/*
* 3d-merge ends
* */
// Discard merged result if not needed anymore
//delete merged_tuples;
#ifdef TIMING
t0 = MPI_Wtime();
#endif
// Do not delete elements of recvChunks, because that would give segmentation fault due to double free
::operator delete(recvTuples);
for(int i = 0; i < recvChunks.size(); i++){
recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault
delete recvChunks[i]; // As the patch is used, now delete each element of recvChunks
}
vector<SpTuples<IU,NUO>*>().swap(recvChunks); // As the patch is used, now delete recvChunks
// This operation is not needed if result can be used and discareded right away
// This operation is being done because it is needed by MCLPruneRecoverySelect
UDERO * phaseResultant = new UDERO(*merged_tuples, false);
SpParMat<IU, NUO, UDERO> phaseResultantLayer(phaseResultant, A.getcommgrid3D()->layerWorld);
MCLPruneRecoverySelect(phaseResultantLayer, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion);
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_kselecttime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMCLPruneRecoverySelect time: %lf\n",p, (t1-t0));
#endif
toconcatenate.push_back(phaseResultantLayer.seq());
#ifdef TIMING
if(myrank == 0) fprintf(stderr, "***\n");
#endif
}
for(int i = 0; i < PiecesOfB.size(); i++) delete PiecesOfB[i];
std::shared_ptr<CommGrid3D> grid3d;
grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial()));
UDERO * localResultant = new UDERO(0, A.GetLayerMat()->seqptr()->getnrow(), divisions3d[A.getcommgrid3D()->rankInFiber], 0);
localResultant->ColConcatenate(toconcatenate);
SpParMat3D<IU, NUO, UDERO> C3D(localResultant, grid3d, A.isColSplit(), A.isSpecial());
return C3D;
}
}
#endif
|
atomic-12.c | /* { dg-do run } */
extern void abort (void);
_Bool v, x1, x2, x3, x4, x5, x6;
void
foo (void)
{
#pragma omp atomic capture
v = ++x1;
if (!v)
abort ();
#pragma omp atomic capture
v = x2++;
if (v)
abort ();
#pragma omp atomic capture
v = --x3;
if (v)
abort ();
#pragma omp atomic capture
v = x4--;
if (!v)
abort ();
#pragma omp atomic capture
{ v = x5; x5 |= 1; }
if (v)
abort ();
#pragma omp atomic capture
{ x6 |= 1; v = x6; }
if (!v)
abort ();
}
void
bar (void)
{
#pragma omp atomic write
x1 = 0;
#pragma omp atomic write
x2 = 0;
#pragma omp atomic write
x3 = 1;
#pragma omp atomic write
x4 = 1;
#pragma omp atomic capture
{ ++x1; v = x1; }
if (!v)
abort ();
#pragma omp atomic capture
{ v = x2; x2++; }
if (v)
abort ();
#pragma omp atomic capture
{ --x3; v = x3; }
if (v)
abort ();
#pragma omp atomic capture
{ v = x4; x4--; }
if (!v)
abort ();
#pragma omp atomic write
x1 = 0;
#pragma omp atomic write
x2 = 0;
#pragma omp atomic write
x3 = 1;
#pragma omp atomic write
x4 = 1;
#pragma omp atomic capture
{ x1++; v = x1; }
if (!v)
abort ();
#pragma omp atomic capture
{ v = x2; ++x2; }
if (v)
abort ();
#pragma omp atomic capture
{ x3--; v = x3; }
if (v)
abort ();
#pragma omp atomic capture
{ v = x4; --x4; }
if (!v)
abort ();
}
int
main ()
{
#pragma omp atomic write
x3 = 1;
#pragma omp atomic write
x4 = 1;
foo ();
bar ();
return 0;
}
|
SVRGUpdater.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _SVRG_UPDATER_
#define _SVRG_UPDATER_
#include "Updater/Updater.h"
class SVRGUpdater : public Updater {
protected:
double n_updates_so_far;
// Vectors for computing SVRG related data.
std::vector<double> lambda;
std::vector<std::vector<double>> h_x, h_y;
std::vector<double> g; // global
// Vectors for computing the sum of gradients (g).
std::vector<std::vector<double>> g_kappa, g_h_bar;
std::vector<double> g_lambda;
std::vector<double> n_zeroes;
void PrepareMu(const std::vector<int> &coordinates) override {
for (int i = 0; i < coordinates.size(); i++) {
int index = coordinates[i];
model->Lambda(index, lambda[index], *model);
}
}
void PrepareH(const Datapoint &datapoint) override {
model->PrecomputeCoefficients(datapoint, gradient, *model);
int n_coords = model->NumCoordinates();
for (const auto &c : datapoint.GetCoordinates())
model->H_bar(c, h_x[c], gradient, *model);
model->PrecomputeCoefficients(datapoint, gradient, *model);
for (const auto &c : datapoint.GetCoordinates())
model->H_bar(c, h_y[c], gradient, *model);
}
double H(int coordinate, int index_into_coordinate_vector) override {
return -FLAGS_learning_rate *
(h_x[coordinate][index_into_coordinate_vector] - h_y[coordinate][index_into_coordinate_vector]);
}
double Nu(int coordinate, int index_into_coordinate_vector) override {
return FLAGS_learning_rate *
(g[coordinate * model->NumCoordinates() + index_into_coordinate_vector] -
lambda[coordinate] * model->Data(coordinate, index_into_coordinate_vector, false));
}
double Mu(int coordinate) override { return lambda[coordinate] * FLAGS_learning_rate; }
public:
SVRGUpdater(Model *model, std::vector<Datapoint *> &datapoints) : Updater(model, datapoints) {
g.resize(model->NumParameters() * model->NumCoordinates());
lambda.resize(model->NumParameters());
h_x.resize(model->NumParameters());
h_y.resize(model->NumParameters());
g_lambda.resize(model->NumParameters());
g_kappa.resize(model->NumParameters());
g_h_bar.resize(model->NumParameters());
n_zeroes.resize(model->NumParameters());
for (int i = 0; i < model->NumParameters(); i++) {
h_x[i].resize(model->NumCoordinates());
h_y[i].resize(model->NumCoordinates());
g_kappa[i].resize(model->NumCoordinates());
g_h_bar[i].resize(model->NumCoordinates());
n_zeroes[i] = datapoints.size();
}
// Compute number of zeroes for each column (parameters) of the model.
int sum = 0;
for (int dp = 0; dp < datapoints.size(); dp++) {
for (auto &coordinate : datapoints[dp]->GetCoordinates()) {
n_zeroes[coordinate]--;
sum++;
}
}
}
void EpochBegin() override {
Updater::EpochBegin();
// TODO: Make a copy of the model every epoch.
// Clear the sum of gradients.
std::fill(g.begin(), g.end(), 0);
// Compute average sum of gradients on the model copy.
int n_coords = model->NumCoordinates();
// zero gradients.
//#pragma omp parallel for num_threads(FLAGS_n_threads)
for (int coordinate = 0; coordinate < model->NumParameters(); coordinate++) {
model->Kappa(coordinate, g_kappa[coordinate], *model);
model->Lambda(coordinate, g_lambda[coordinate], *model);
for (int j = 0; j < n_coords; j++) {
g[coordinate * n_coords + j] =
(g_lambda[coordinate] * model->Data(coordinate, j, false) - g_kappa[coordinate][j]) *
n_zeroes[coordinate];
}
}
// non zero gradients. Essentially do SGD here, on the same partitioning pattern.
//#pragma omp parallel num_threads(FLAGS_n_threads)
{
for (int batch = 0; batch < datapoint_partitions->NumBatches(); batch++) {
//#pragma omp barrier
for (int index = 0; index < datapoint_partitions->NumDatapointsInBatch(FLAGS_wid, batch); index++) {
const auto &datapoint = datapoint_partitions->GetDatapoint(FLAGS_wid, batch, index);
gradient.datapoint = &datapoint;
model->PrecomputeCoefficients(datapoint, gradient, *model);
for (const auto &coord : datapoint.GetCoordinates()) {
model->H_bar(coord, g_h_bar[coord], gradient, *model);
model->Lambda(coord, g_lambda[coord], *model);
model->Kappa(coord, g_kappa[coord], *model);
}
for (const auto &coord : datapoint.GetCoordinates()) {
for (int j = 0; j < n_coords; j++) {
g[coord * n_coords + j] += g_lambda[coord] * model->Data(coord, j, false) -
g_kappa[coord][j] + g_h_bar[coord][j];
}
}
}
}
}
//#pragma omp parallel for num_threads(FLAGS_n_threads)
for (int i = 0; i < model->NumParameters(); i++) {
for (int j = 0; j < n_coords; j++) {
g[i * n_coords + j] /= datapoints.size();
}
}
}
~SVRGUpdater() {}
};
#endif
|
wdp.c | #include "wdp.h"
#include "dnormb.h"
#include "dnorme.h"
#include "dnormf.h"
#include "dnorms.h"
#include "d8sort.h"
double dnb(const fnat n, const double x[static restrict 1])
{
const fint incx = 1;
return dnormb_((const fint*)&n, x, &incx);
}
#ifndef USE_MKL
extern double BLAS_D(dot)(const fint n[static 1], const double x[static 1], const fint incx[static 1], const double y[static 1], const fint incy[static 1]);
#endif /* !USE_MKL */
double ddp(const fnat n, const double x[static restrict 1])
{
const fint incx = 1;
return sqrt(BLAS_D(dot)((const fint*)&n, x, &incx, x, &incx));
}
#ifndef USE_MKL
extern double BLAS_D(nrm2)(const fint n[static 1], const double x[static 1], const fint incx[static 1]);
#endif /* !USE_MKL */
double dn2(const fnat n, const double x[static restrict 1])
{
const fint incx = 1;
return BLAS_D(nrm2)((const fint*)&n, x, &incx);
}
double dnf(const fnat n, const double x[static restrict 1])
{
double e0, f0, e1, f1;
return dnormf_(&n, x, &e0, &f0, &e1, &f1);
}
double dne(const fnat n, const double x[static restrict VDL])
{
double e0, f0, e1, f1;
return dnorme_(&n, x, &e0, &f0, &e1, &f1);
}
double dns(const fnat n, const double x[static restrict VDL])
{
double e0, f0, e1, f1;
return dnorms_(&n, x, &e0, &f0, &e1, &f1);
}
double dnc(const fnat n, const double x[static restrict VDL])
{
#ifdef DZNRME_SEQRED
alignas(VA) double sq[VDL]
#ifndef NDEBUG
= { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }
#endif /* !NDEBUG */
;
#endif /* DZNRME_SEQRED */
register __m512d vsq = _mm512_setzero_pd();
for (fnat i = 0u; i < n; i += VDL) {
register __m512d xi = _mm512_load_pd(x + i);
/* VDSORT(xi); */
vsq = _mm512_fmadd_pd(xi, xi, vsq);
}
VDSORT(vsq);
#ifdef DZNRME_SEQRED
_mm512_store_pd(sq, vsq);
*sq += sq[1u];
*sq += sq[2u];
*sq += sq[3u];
*sq += sq[4u];
*sq += sq[5u];
*sq += sq[6u];
*sq += sq[7u];
return sqrt(*sq);
#else /* !DZNRME_SEQRED */
vsq = _mm512_permutexvar_pd(_mm512_set_epi64(7, 6, 3, 2, 5, 4, 1, 0), vsq);
register const __m256d sq4 = _mm256_hadd_pd(_mm512_extractf64x4_pd(vsq, 0), _mm512_extractf64x4_pd(vsq, 1));
register const __m128d sq2 = _mm_hadd_pd(_mm256_extractf128_pd(sq4, 0), _mm256_extractf128_pd(sq4, 1));
register const __m128 sqs = _mm_castpd_ps(sq2);
register const __m128d sqd = _mm_castps_pd(_mm_movehl_ps(sqs, sqs));
return sqrt(_mm_cvtsd_f64(sq2) + _mm_cvtsd_f64(sqd));
#endif /* ?DZNRME_SEQRED */
}
double dnd(const fnat n, const double x[static restrict VDL])
{
alignas(VA) double sq[VDL]
#ifndef NDEBUG
= { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }
#endif /* !NDEBUG */
;
register __m512d vsq = _mm512_setzero_pd();
for (fnat i = 0u; i < n; i += VDL) {
register __m512d xi = _mm512_load_pd(x + i);
VDSORT(xi);
vsq = _mm512_fmadd_pd(xi, xi, vsq);
}
_mm512_store_pd(sq, vsq);
*sq += sq[1u];
*sq += sq[2u];
*sq += sq[3u];
*sq += sq[4u];
*sq += sq[5u];
*sq += sq[6u];
*sq += sq[7u];
return sqrt(*sq);
}
double wsq(const fnat n, const double x[static restrict 1])
{
wide sq = W_ZERO;
#ifdef WSQ_OMP
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,x) reduction(+:sq)
#endif /* !_OPENMP */
#endif /* WSQ_OMP */
for (fnat i = 0u; i < n; ++i) {
const wide w = x[i];
sq = fmaw(w, w, sq);
}
return (double)sqrtw(sq);
}
double dre(const double c, const double e)
{
const double a = fabs(c - e);
return ((e == 0.0) ? ((a == 0.0) ? 0.0 : (a / e)) : (a / e));
}
|
fixed_version.c | #include<stdio.h>
int main(){
int sum = 1;
int i =1;
// increase sum by one each iteratiob using openmp
#pragma omp parallel for firstprivate(i) reduction( + : sum )
for (int j = i; j < 100; j++) {
sum +=1;
}
printf("sum is %d\n",sum);
}
|
double_reduction_plus.c |
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main()
{
double result = 0;
#pragma omp parallel reduction(+:result)
{
int rank = omp_get_thread_num();
result += rank;
}
printf("Result: %f\n", result);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr,
bool *ValueDependent = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromNonTypeTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
SmallVector<SourceLocation, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<StringRef> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD) {
return targetDiag(Loc, PD.getDiagID()) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
DRB040-truedepsingleelement-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Data race pair: a[i]@63:5 vs. a[0]@63:15
*/
#include <stdlib.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int len = 1000;
int i;
if (argc > 1)
len = atoi(argv[1]);
int a[len];
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = i;
}
a[0] = 2;
for (i = 0; i <= len - 1; i += 1) {
a[i] = a[i] + a[0];
}
for (i = 0; i <= len - 1; i += 1) {
printf("%d\n",a[i]);
}
return 0;
}
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; --i)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the the
/// iterations.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down. \param IsSigned Whether Start, Stop
/// and Stop are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param Loc The source location description, the insertion location
/// is not used.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Updated CanonicalLoopInfo.
CanonicalLoopInfo *createStaticWorkshareLoop(const LocationDescription &Loc,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
/// \param Loc The source location description, the insertion location
/// is not used.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the loop.
InsertPointTy createDynamicWorkshareLoop(const LocationDescription &Loc,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param Loc The source location description, the insertion location
/// is not used.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Updated CanonicalLoopInfo.
CanonicalLoopInfo *createWorkshareLoop(const LocationDescription &Loc,
CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr();
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Value *getOrCreateIdent(Constant *SrcLocStr,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
// Get the type corresponding to __kmpc_impl_lanemask_t from the deviceRTL
Type *getLanemaskType();
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArgs
/// \param MapnamesArg
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param TotalNbOperand Number of operand in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the master.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXLHSInRHSPart);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXLHSInRHSPart);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXLHSInRHSPart);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// Code in the header, condition block, latch and exit block must not have any
/// side-effect. The body block is the single entry point into the loop body,
/// which may contain arbitrary control flow as long as all control paths
/// eventually branch to the latch block.
///
/// Defined outside OpenMPIRBuilder because one cannot forward-declare nested
/// classes.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
/// Whether this object currently represents a loop.
bool IsValid = false;
BasicBlock *Preheader;
BasicBlock *Header;
BasicBlock *Cond;
BasicBlock *Body;
BasicBlock *Latch;
BasicBlock *Exit;
BasicBlock *After;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const { return Preheader; }
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const { return Header; }
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const { return Cond; }
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const { return Body; }
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const { return Latch; }
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const { return Exit; }
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const { return After; }
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const { return getIndVar()->getType(); }
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
return {After, After->begin()};
};
Function *getFunction() const { return Header->getParent(); }
/// Consistency self-check.
void assertOK() const;
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
GB_unop__minv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_uint8_uint8)
// op(A') function: GB (_unop_tran__minv_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 8) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fieldize.h | #include <Python.h>
#include "numpy/arrayobject.h"
#include <new>
#include <map>
class Summer
{
public:
Summer(double * field_i, int nx_i):
field(field_i), nx(nx_i)
{};
/* virtual void doSum(const double input, const int xoff, const int yoff); */
protected:
double * const field;
const int nx;
};
//A class which interpolates data onto a regular grid, projecting
//in the x direction and using SPH interpolation.
template <class T> class SphInterp
{
public:
//field: pointer to the array where interpolated data is stored
//comp: pointer to temporary memory for Kahan summation. Not used if NO_KAHAN is defined
//nx: size of the above arrays is nx*nx
//periodic: should we assume input array is periodic?
SphInterp(T& sum_i, int nx_i, bool periodic_i):
sum(sum_i), nx(nx_i), periodic(periodic_i)
{};
//pos: array of particle positions
//radii: particule smoothing lengths
//value: amount to interpolate to grid
//weights: weights with which to interpolate
//nval: size of the above arrays
int do_work(PyArrayObject *pos, PyArrayObject *radii, PyArrayObject *value, PyArrayObject *weights, const npy_int nval);
/* { */
/* */
/* return 0; */
/* }; */
private:
T& sum;
const int nx;
const bool periodic;
};
class SimpleSummer: public Summer
{
public:
SimpleSummer(double * field_i, int nx_i):
Summer(field_i, nx_i)
{};
inline void doSum(const double input, const int xoff, const int yoff)
{
field[nx*xoff+yoff]+=input;
}
};
//As above, but interpolation uses Kahan Summation
class KahanSummer: public Summer
{
public:
KahanSummer(double * field_i, int nx_i):
Summer(field_i, nx_i)
{
//Allocate Kahan compensation array, and throw if we can't.
comp = (double *) calloc(nx*nx,sizeof(double));
if( !comp )
throw std::bad_alloc();
}
~KahanSummer()
{
free(comp);
};
/*Evaluate one iteration of Kahan Summation: sum is the current value of the field,
*comp the compensation array, input the value to add this time.*/
inline void doSum(const double input, const int xoff, const int yoff)
{
const int off = nx*xoff+yoff;
const double yy = input - *(comp+off);
const double temp = *(field+off)+yy; //Alas, field is big, y small, so low-order digits of y are lost.
*(comp+off) = (temp - *(field+off)) -yy; //(t - field) recovers the high-order part of y; subtracting y recovers -(low part of y)
*(field+off) = temp; //Algebraically, c should always be zero. Beware eagerly optimising compilers!
}
private:
double * comp;
};
//As above, but discard all interpolation except
//onto a predefined list of array elements
class DiscardingSummer: public Summer
{
public:
DiscardingSummer(double * field_i, PyArrayObject * positions, int nx_i):
Summer(field_i, nx_i)
{
npy_intp nlist = PyArray_SIZE(positions);
//Build an index of the actual positions of each item we want in the output array.
for(int i=0; i<nlist;i++)
index.insert(std::pair<int,int>(*(int64_t *)PyArray_GETPTR1(positions,i),i));
//Allocate Kahan compensation array, and throw if we can't.
comp = (double *) calloc(nlist,sizeof(double));
if( !comp )
throw std::bad_alloc();
}
/*Evaluate one iteration of Kahan Summation: sum is the current value of the field,
*comp the compensation array, input the value to add this time.*/
inline void doSum(const double input, const int xoff, const int yoff)
{
const int off = nx*xoff+yoff;
std::map<const int, const int>::const_iterator it = index.find(off);
if(it != index.end())
{
const double yy = input - comp[it->second];
const double temp = field[it->second]+yy; //Alas, field is big, y small, so low-order digits of y are lost.
comp[it->second] = temp - field[it->second] -yy; //(t - field) recovers the high-order part of y; subtracting y recovers -(low part of y)
field[it->second] = temp; //Algebraically, c should always be zero. Beware eagerly optimising compilers!
}
}
private:
std::map<const int, const int> index;
double * comp;
};
/*Compute the SPH weighting for this cell, using the trapezium rule.
* rr is the smoothing length, r0 is the distance of the cell from the center*/
double compute_sph_cell_weight(double rr, double r0);
/**
Do the hard work interpolating with an SPH kernel particles handed to us from python.
*/
template <class T> int SphInterp<T>::do_work(PyArrayObject *pos, PyArrayObject *radii, PyArrayObject *value, PyArrayObject *weights, const npy_int nval)
{
for(int p=0;p<nval;p++){
//Temp variables
float pp[2];
pp[0]= *(float *)PyArray_GETPTR2(pos,p,1);
pp[1]= *(float *)PyArray_GETPTR2(pos,p,2);
const float rr= *((float *)PyArray_GETPTR1(radii,p));
const float val= *((float *)PyArray_GETPTR1(value,p));
double weight = 1;
if (PyArray_DIM(weights,0) == nval){
weight= *((double *)PyArray_GETPTR1(weights,p));
//Why do we do this? Because PyArray_DIM(None) == 1.
//Thus, if we have been passed a single particle,
//we can set its weight to 0, and cause infinities.
if (weight == 0)
weight = 1;
}
//Max size of kernel
const int upgx = floor(pp[0]+rr);
const int upgy = floor(pp[1]+rr);
const int lowgx = floor(pp[0]-rr);
const int lowgy = floor(pp[1]-rr);
//Try to save some integrations if this particle is totally in this cell
if (lowgx==upgx && lowgy==upgy && lowgx >= 0 && lowgy >= 0){
sum.doSum(val/weight, lowgx,lowgy);
continue;
}
/*Array for storing cell weights*/
double sph_w[upgy-lowgy+1][upgx-lowgx+1];
/*Total of cell weights*/
double total=0;
/* First compute the cell weights.
* Subsample the cells if the smoothing length is O(1 cell).
* This is more accurate, and also avoids edge cases where the particle can rest just between a cell.*/
int nsub=2*((int)(2./rr))+1;
double subs[nsub];
/*Spread subsamples evenly across cell*/
for(int i=0; i < nsub; i++)
subs[i] = (i+1.)/(1.*nsub+1);
#pragma omp parallel for reduction(+:total)
for(int gy=lowgy;gy<=upgy;gy++)
for(int gx=lowgx;gx<=upgx;gx++){
sph_w[gy-lowgy][gx-lowgx]=0;
for(int iy=0; iy< nsub; iy++)
for(int ix=0; ix< nsub; ix++){
double xx = gx-pp[0]+subs[ix];
double yy = gy-pp[1]+subs[iy];
double r0 = sqrt(xx*xx+yy*yy);
sph_w[gy-lowgy][gx-lowgx]+=compute_sph_cell_weight(rr,r0)/nsub/nsub;
}
total+=sph_w[gy-lowgy][gx-lowgx];
}
// if(total > 1.05)
// tothigh++;
// if(total< 0.5)
// totlow++;
if(total == 0){
// fprintf(stderr,"Massless particle detected! rr=%g gy=%d gx=%d nsub = %d pp= %g %g \n",rr,upgy-lowgy,upgx-lowgx, nsub,-pp[0]+lowgx,-pp[1]+lowgy);
return 1;
}
/* Some cells will be only partially in the array: only partially add them.
* Then add the right fraction to the total array*/
#pragma omp parallel for
for(int gy=std::max(lowgy,0);gy<=std::min(upgy,nx-1);gy++)
for(int gx=std::max(lowgx,0);gx<=std::min(upgx,nx-1);gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx,gy);
}
//Deal with cells that have wrapped around the edges of the grid
if (periodic){
//Wrapping y over
#pragma omp parallel for
for(int gy=nx-1;gy<=upgy;gy++){
//Wrapping only y over
for(int gx=std::max(lowgx,0);gx<=std::min(upgx,nx-1);gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx,gy-(nx-1));
}
//y over, x over
for(int gx=nx-1;gx<=upgx;gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx-(nx-1),gy-(nx-1));
}
//y over, x under
for(int gx=lowgx;gx<=0;gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx+(nx-1),gy-(nx-1));
}
}
//Wrapping y under
#pragma omp parallel for
for(int gy=lowgy;gy<=0;gy++){
//Only y under
for(int gx=std::max(lowgx,0);gx<=std::min(upgx,nx-1);gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx,gy+(nx-1));
}
//y under, x over
for(int gx=nx-1;gx<=upgx;gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx-(nx-1),gy+(nx-1));
}
//y under, x under
for(int gx=lowgx;gx<=0;gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx+(nx-1),gy+(nx-1));
}
}
//Finally wrap only x
#pragma omp parallel for
for(int gy=std::max(lowgy,0);gy<=std::min(upgy,nx-1);gy++){
//x over
for(int gx=nx-1;gx<=upgx;gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx-(nx-1),gy);
}
//x under
for(int gx=lowgx;gx<=0;gx++){
sum.doSum(val*sph_w[gy-lowgy][gx-lowgx]/total/weight,gx+(nx-1),gy);
}
}
}
}
return 0;
}
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occurred in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (tile_size_x > data_width || tile_size_y > data_height ||
tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
int err_code = TINYEXR_SUCCESS;
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<size_t> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
size_t tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data size.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
size_t data_size =
size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
if (tile_coordinates[3] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data length.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// TODO(LTE): atomic
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto &t : workers) {
t.join();
}
#else
}
#endif
if (err_code != TINYEXR_SUCCESS) {
return err_code;
}
exr_image->num_tiles = static_cast<int>(num_tiles);
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
if (exr_header->tile_size_x > data_width || exr_header->tile_size_x < 1 ||
exr_header->tile_size_y > data_height || exr_header->tile_size_y < 1) {
tinyexr::SetErrorMessage("tile sizes are invalid.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(&comp);
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(&data[0]);
tinyexr::swap4(&data[1]);
tinyexr::swap4(&data[2]);
tinyexr::swap4(&data[3]);
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(&aspectRatio);
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(¢er[0]);
tinyexr::swap4(¢er[1]);
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(&w);
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// TODO(LTE): C++11 thread
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// 9'th, 11'th and 12'th bit must be 0: data is stored as regular scanline
// TODO: support long names(9th bit: 0x200)
// so, values must be [2, 8, 0, 0]
if (marker[0] != 2 || (marker[1] != 8) || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
lensing.c | /** @file lensing.c Documented lensing module
*
* Simon Prunet and Julien Lesgourgues, 6.12.2010
*
* This module computes the lensed temperature and polarization
* anisotropy power spectra \f$ C_l^{X}, P(k), ... \f$'s given the
* unlensed temperature, polarization and lensing potential spectra.
*
* Follows Challinor and Lewis full-sky method, astro-ph/0502425
*
* The following functions can be called from other modules:
*
* -# lensing_init() at the beginning (but after spectra_init())
* -# lensing_cl_at_l() at any time for computing Cl_lensed at any l
* -# lensing_free() at the end
*/
#include "lensing.h"
#include <time.h>
/**
* Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions.
* SO FAR: ONLY SCALAR
*
* This routine evaluates all the lensed \f$ C_l\f$'s at a given value of l by
* picking it in the pre-computed table. When relevant, it also
* sums over all initial conditions for each mode, and over all modes.
*
* This function can be called from whatever module at whatever time,
* provided that lensing_init() has been called before, and
* lensing_free() has not been called yet.
*
* @param ple Input: pointer to lensing structure
* @param l Input: multipole number
* @param cl_lensed Output: lensed \f$ C_l\f$'s for all types (TT, TE, EE, etc..)
* @return the error status
*/
int lensing_cl_at_l(
struct lensing * ple,
int l,
double * cl_lensed /* array with argument cl_lensed[index_ct] (must be already allocated) */
) {
int last_index;
int index_lt;
class_test(l > ple->l_lensed_max,
ple->error_message,
"you asked for lensed Cls at l=%d, they were computed only up to l=%d, you should increase l_max_scalars or decrease the precision parameter delta_l_max",l,ple->l_lensed_max);
class_call(array_interpolate_spline(ple->l,
ple->l_size,
ple->cl_lens,
ple->ddcl_lens,
ple->lt_size,
l,
&last_index,
cl_lensed,
ple->lt_size,
ple->error_message),
ple->error_message,
ple->error_message);
/* set to zero for the types such that l<l_max */
for (index_lt=0; index_lt<ple->lt_size; index_lt++)
if ((int)l > ple->l_max_lt[index_lt])
cl_lensed[index_lt]=0.;
return _SUCCESS_;
}
/**
* This routine initializes the lensing structure (in particular,
* computes table of lensed anisotropy spectra \f$ C_l^{X} \f$)
*
* @param ppr Input: pointer to precision structure
* @param ppt Input: pointer to perturbation structure (just in case, not used in current version...)
* @param psp Input: pointer to spectra structure
* @param pnl Input: pointer to nonlinear structure
* @param ple Output: pointer to initialized lensing structure
* @return the error status
*/
int lensing_init(
struct precision * ppr,
struct perturbs * ppt,
struct spectra * psp,
struct nonlinear * pnl,
struct lensing * ple
) {
/** Summary: */
/** - Define local variables */
double * mu; /* mu[index_mu]: discretized values of mu
between -1 and 1, roots of Legendre polynomial */
double * w8; /* Corresponding Gauss-Legendre quadrature weights */
double theta,delta_theta;
double ** d00; /* dmn[index_mu][index_l] */
double ** d11;
double ** d2m2;
double ** d22;
double ** d20;
double ** d1m1;
double ** d31;
double ** d40;
double ** d3m1;
double ** d3m3;
double ** d4m2;
double ** d4m4;
double * buf_dxx; /* buffer */
double * Cgl; /* Cgl[index_mu] */
double * Cgl2; /* Cgl2[index_mu] */
double * sigma2; /* sigma[index_mu] */
double * ksi; /* ksi[index_mu] */
double * ksiX; /* ksiX[index_mu] */
double * ksip; /* ksip[index_mu] */
double * ksim; /* ksim[index_mu] */
double fac,fac1;
double X_000;
double X_p000;
double X_220;
double X_022;
double X_p022;
double X_121;
double X_132;
double X_242;
int num_mu,index_mu,icount;
int l;
double ll;
double * cl_unlensed; /* cl_unlensed[index_ct] */
double * cl_tt; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_te; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_ee; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_bb; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double * cl_pp; /* potential cl, to be filled to avoid repeated calls to spectra_cl_at_l */
double res,resX,lens;
double resp, resm, lensp, lensm;
double * sqrt1;
double * sqrt2;
double * sqrt3;
double * sqrt4;
double * sqrt5;
double ** cl_md_ic; /* array with argument
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */
double ** cl_md; /* array with argument
cl_md[index_md][index_ct] */
int index_md;
/* Timing */
//double debut, fin;
//double cpu_time;
/** - check that we really want to compute at least one spectrum */
if (ple->has_lensed_cls == _FALSE_) {
if (ple->lensing_verbose > 0)
printf("No lensing requested. Lensing module skipped.\n");
return _SUCCESS_;
}
else {
if (ple->lensing_verbose > 0) {
printf("Computing lensed spectra ");
if (ppr->accurate_lensing==_TRUE_)
printf("(accurate mode)\n");
else
printf("(fast mode)\n");
}
}
/** - initialize indices and allocate some of the arrays in the
lensing structure */
class_call(lensing_indices(ppr,psp,ple),
ple->error_message,
ple->error_message);
/** - put all precision variables hare; will be stored later in precision structure */
/** - Last element in \f$ \mu \f$ will be for \f$ \mu=1 \f$, needed for sigma2.
The rest will be chosen as roots of a Gauss-Legendre quadrature **/
if (ppr->accurate_lensing == _TRUE_) {
num_mu=(ple->l_unlensed_max+ppr->num_mu_minus_lmax); /* Must be even ?? CHECK */
num_mu += num_mu%2; /* Force it to be even */
} else {
/* Integrate correlation function difference on [0,pi/16] */
num_mu = (ple->l_unlensed_max * 2 )/16;
}
/** - allocate array of \f$ \mu \f$ values, as well as quadrature weights */
class_alloc(mu,
num_mu*sizeof(double),
ple->error_message);
/* Reserve last element of mu for mu=1, needed for sigma2 */
mu[num_mu-1] = 1.0;
class_alloc(w8,
(num_mu-1)*sizeof(double),
ple->error_message);
if (ppr->accurate_lensing == _TRUE_) {
//debut = omp_get_wtime();
class_call(quadrature_gauss_legendre(mu,
w8,
num_mu-1,
ppr->tol_gauss_legendre,
ple->error_message),
ple->error_message,
ple->error_message);
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in quadrature_gauss_legendre=%4.3f s\n",cpu_time);
} else { /* Crude integration on [0,pi/16]: Riemann sum on theta */
delta_theta = _PI_/16. / (double)(num_mu-1);
for (index_mu=0;index_mu<num_mu-1;index_mu++) {
theta = (index_mu+1)*delta_theta;
mu[index_mu] = cos(theta);
w8[index_mu] = sin(theta)*delta_theta; /* We integrate on mu */
}
}
/** - Compute \f$ d^l_{mm'} (\mu) \f$*/
icount = 0;
class_alloc(d00,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d11,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d1m1,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d2m2,
num_mu*sizeof(double*),
ple->error_message);
icount += 4*num_mu*(ple->l_unlensed_max+1);
if(ple->has_te==_TRUE_) {
class_alloc(d20,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d3m1,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d4m2,
num_mu*sizeof(double*),
ple->error_message);
icount += 3*num_mu*(ple->l_unlensed_max+1);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_alloc(d22,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d31,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d3m3,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d40,
num_mu*sizeof(double*),
ple->error_message);
class_alloc(d4m4,
num_mu*sizeof(double*),
ple->error_message);
icount += 5*num_mu*(ple->l_unlensed_max+1);
}
icount += 5*(ple->l_unlensed_max+1); /* for arrays sqrt1[l] to sqrt5[l] */
/** - Allocate main contiguous buffer **/
class_alloc(buf_dxx,
icount * sizeof(double),
ple->error_message);
icount = 0;
for (index_mu=0; index_mu<num_mu; index_mu++) {
d00[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d11[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d1m1[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
d2m2[index_mu]= &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 4*num_mu*(ple->l_unlensed_max+1);
if (ple->has_te==_TRUE_) {
for (index_mu=0; index_mu<num_mu; index_mu++) {
d20[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d3m1[index_mu]= &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d4m2[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 3*num_mu*(ple->l_unlensed_max+1);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
for (index_mu=0; index_mu<num_mu; index_mu++) {
d22[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]);
d31[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]);
d3m3[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]);
d40[index_mu] = &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]);
d4m4[index_mu]= &(buf_dxx[icount+(index_mu+4*num_mu) * (ple->l_unlensed_max+1)]);
}
icount += 5*num_mu*(ple->l_unlensed_max+1);
}
sqrt1 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt2 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt3 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt4 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
sqrt5 = &(buf_dxx[icount]);
icount += ple->l_unlensed_max+1;
//debut = omp_get_wtime();
class_call(lensing_d00(mu,num_mu,ple->l_unlensed_max,d00),
ple->error_message,
ple->error_message);
class_call(lensing_d11(mu,num_mu,ple->l_unlensed_max,d11),
ple->error_message,
ple->error_message);
class_call(lensing_d1m1(mu,num_mu,ple->l_unlensed_max,d1m1),
ple->error_message,
ple->error_message);
class_call(lensing_d2m2(mu,num_mu,ple->l_unlensed_max,d2m2),
ple->error_message,
ple->error_message);
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in lensing_dxx=%4.3f s\n",cpu_time);
if (ple->has_te==_TRUE_) {
class_call(lensing_d20(mu,num_mu,ple->l_unlensed_max,d20),
ple->error_message,
ple->error_message);
class_call(lensing_d3m1(mu,num_mu,ple->l_unlensed_max,d3m1),
ple->error_message,
ple->error_message);
class_call(lensing_d4m2(mu,num_mu,ple->l_unlensed_max,d4m2),
ple->error_message,
ple->error_message);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_call(lensing_d22(mu,num_mu,ple->l_unlensed_max,d22),
ple->error_message,
ple->error_message);
class_call(lensing_d31(mu,num_mu,ple->l_unlensed_max,d31),
ple->error_message,
ple->error_message);
class_call(lensing_d3m3(mu,num_mu,ple->l_unlensed_max,d3m3),
ple->error_message,
ple->error_message);
class_call(lensing_d40(mu,num_mu,ple->l_unlensed_max,d40),
ple->error_message,
ple->error_message);
class_call(lensing_d4m4(mu,num_mu,ple->l_unlensed_max,d4m4),
ple->error_message,
ple->error_message);
}
/** - compute \f$ Cgl(\mu)\f$, \f$ Cgl2(\mu) \f$ and sigma2(\f$\mu\f$) */
class_alloc(Cgl,
num_mu*sizeof(double),
ple->error_message);
class_alloc(Cgl2,
num_mu*sizeof(double),
ple->error_message);
class_alloc(sigma2,
(num_mu-1)*sizeof(double), /* Zero separation is omitted */
ple->error_message);
class_alloc(cl_unlensed,
psp->ct_size*sizeof(double),
ple->error_message);
/** - Locally store unlensed temperature \f$ cl_{tt}\f$ and potential \f$ cl_{pp}\f$ spectra **/
class_alloc(cl_tt,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
if (ple->has_te==_TRUE_) {
class_alloc(cl_te,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_alloc(cl_ee,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
class_alloc(cl_bb,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
}
class_alloc(cl_pp,
(ple->l_unlensed_max+1)*sizeof(double),
ple->error_message);
class_alloc(cl_md_ic,
psp->md_size*sizeof(double *),
ple->error_message);
class_alloc(cl_md,
psp->md_size*sizeof(double *),
ple->error_message);
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
class_alloc(cl_md[index_md],
psp->ct_size*sizeof(double),
ple->error_message);
if (psp->ic_size[index_md] > 1)
class_alloc(cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double),
ple->error_message);
}
for (l=2; l<=ple->l_unlensed_max; l++) {
class_call(spectra_cl_at_l(psp,l,cl_unlensed,cl_md,cl_md_ic),
psp->error_message,
ple->error_message);
cl_tt[l] = cl_unlensed[ple->index_lt_tt];
cl_pp[l] = cl_unlensed[ple->index_lt_pp];
if (ple->has_te==_TRUE_) {
cl_te[l] = cl_unlensed[ple->index_lt_te];
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
cl_ee[l] = cl_unlensed[ple->index_lt_ee];
cl_bb[l] = cl_unlensed[ple->index_lt_bb];
}
}
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
free(cl_md[index_md]);
if (psp->ic_size[index_md] > 1)
free(cl_md_ic[index_md]);
}
free(cl_md_ic);
free(cl_md);
/** - Compute sigma2\f$(\mu)\f$ and Cgl2(\f$\mu\f$) **/
//debut = omp_get_wtime();
#pragma omp parallel for \
private (index_mu,l) \
schedule (static)
for (index_mu=0; index_mu<num_mu; index_mu++) {
Cgl[index_mu]=0;
Cgl2[index_mu]=0;
for (l=2; l<=ple->l_unlensed_max; l++) {
Cgl[index_mu] += (2.*l+1.)*l*(l+1.)*
cl_pp[l]*d11[index_mu][l];
Cgl2[index_mu] += (2.*l+1.)*l*(l+1.)*
cl_pp[l]*d1m1[index_mu][l];
}
Cgl[index_mu] /= 4.*_PI_;
Cgl2[index_mu] /= 4.*_PI_;
}
for (index_mu=0; index_mu<num_mu-1; index_mu++) {
/* Cgl(1.0) - Cgl(mu) */
sigma2[index_mu] = Cgl[num_mu-1] - Cgl[index_mu];
}
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in Cgl,Cgl2,sigma2=%4.3f s\n",cpu_time);
/** - compute ksi, ksi+, ksi-, ksiX */
/** - --> ksi is for TT **/
if (ple->has_tt==_TRUE_) {
class_calloc(ksi,
(num_mu-1),
sizeof(double),
ple->error_message);
}
/** - --> ksiX is for TE **/
if (ple->has_te==_TRUE_) {
class_calloc(ksiX,
(num_mu-1),
sizeof(double),
ple->error_message);
}
/** - --> ksip, ksim for EE, BB **/
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_calloc(ksip,
(num_mu-1),
sizeof(double),
ple->error_message);
class_calloc(ksim,
(num_mu-1),
sizeof(double),
ple->error_message);
}
for (l=2;l<=ple->l_unlensed_max;l++) {
ll = (double)l;
sqrt1[l]=sqrt((ll+2)*(ll+1)*ll*(ll-1));
sqrt2[l]=sqrt((ll+2)*(ll-1));
sqrt3[l]=sqrt((ll+3)*(ll-2));
sqrt4[l]=sqrt((ll+4)*(ll+3)*(ll-2.)*(ll-3));
sqrt5[l]=sqrt(ll*(ll+1));
}
//debut = omp_get_wtime();
#pragma omp parallel for \
private (index_mu,l,ll,res,resX,resp,resm,lens,lensp,lensm, \
fac,fac1,X_000,X_p000,X_220,X_022,X_p022,X_121,X_132,X_242) \
schedule (static)
for (index_mu=0;index_mu<num_mu-1;index_mu++) {
for (l=2;l<=ple->l_unlensed_max;l++) {
ll = (double)l;
fac = ll*(ll+1)/4.;
fac1 = (2*ll+1)/(4.*_PI_);
/* In the following we will keep terms of the form (sigma2)^k*(Cgl2)^m
with k+m <= 2 */
X_000 = exp(-fac*sigma2[index_mu]);
X_p000 = -fac*X_000;
/* X_220 = 0.25*sqrt1[l] * exp(-(fac-0.5)*sigma2[index_mu]); */
X_220 = 0.25*sqrt1[l] * X_000; /* Order 0 */
/* next 5 lines useless, but avoid compiler warning 'may be used uninitialized' */
X_242=0.;
X_132=0.;
X_121=0.;
X_p022=0.;
X_022=0.;
if (ple->has_te==_TRUE_ || ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
/* X_022 = exp(-(fac-1.)*sigma2[index_mu]); */
X_022 = X_000 * (1+sigma2[index_mu]*(1+0.5*sigma2[index_mu])); /* Order 2 */
X_p022 = (fac-1.)*X_022;
/* X_242 = 0.25*sqrt4[l] * exp(-(fac-5./2.)*sigma2[index_mu]); */
X_242 = 0.25*sqrt4[l] * X_000; /* Order 0 */
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
/* X_121 = - 0.5*sqrt2[l] * exp(-(fac-2./3.)*sigma2[index_mu]);
X_132 = - 0.5*sqrt3[l] * exp(-(fac-5./3.)*sigma2[index_mu]); */
X_121 = -0.5*sqrt2[l] * X_000 * (1+2./3.*sigma2[index_mu]); /* Order 1 */
X_132 = -0.5*sqrt3[l] * X_000 * (1+5./3.*sigma2[index_mu]); /* Order 1 */
}
}
if (ple->has_tt==_TRUE_) {
res = fac1*cl_tt[l];
lens = (X_000*X_000*d00[index_mu][l] +
X_p000*X_p000*d1m1[index_mu][l]
*Cgl2[index_mu]*8./(ll*(ll+1)) +
(X_p000*X_p000*d00[index_mu][l] +
X_220*X_220*d2m2[index_mu][l])
*Cgl2[index_mu]*Cgl2[index_mu]);
if (ppr->accurate_lensing == _FALSE_) {
/* Remove unlensed correlation function */
lens -= d00[index_mu][l];
}
res *= lens;
ksi[index_mu] += res;
}
if (ple->has_te==_TRUE_) {
resX = fac1*cl_te[l];
lens = ( X_022*X_000*d20[index_mu][l] +
Cgl2[index_mu]*2.*X_p000/sqrt5[l] *
(X_121*d11[index_mu][l] + X_132*d3m1[index_mu][l]) +
0.5 * Cgl2[index_mu] * Cgl2[index_mu] *
( ( 2.*X_p022*X_p000+X_220*X_220 ) *
d20[index_mu][l] + X_220*X_242*d4m2[index_mu][l] ) );
if (ppr->accurate_lensing == _FALSE_) {
lens -= d20[index_mu][l];
}
resX *= lens;
ksiX[index_mu] += resX;
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
resp = fac1*(cl_ee[l]+cl_bb[l]);
resm = fac1*(cl_ee[l]-cl_bb[l]);
lensp = ( X_022*X_022*d22[index_mu][l] +
2.*Cgl2[index_mu]*X_132*X_121*d31[index_mu][l] +
Cgl2[index_mu]*Cgl2[index_mu] *
( X_p022*X_p022*d22[index_mu][l] +
X_242*X_220*d40[index_mu][l] ) );
lensm = ( X_022*X_022*d2m2[index_mu][l] +
Cgl2[index_mu] *
( X_121*X_121*d1m1[index_mu][l] +
X_132*X_132*d3m3[index_mu][l] ) +
0.5 * Cgl2[index_mu] * Cgl2[index_mu] *
( 2.*X_p022*X_p022*d2m2[index_mu][l] +
X_220*X_220*d00[index_mu][l] +
X_242*X_242*d4m4[index_mu][l] ) );
if (ppr->accurate_lensing == _FALSE_) {
lensp -= d22[index_mu][l];
lensm -= d2m2[index_mu][l];
}
resp *= lensp;
resm *= lensm;
ksip[index_mu] += resp;
ksim[index_mu] += resm;
}
}
}
//fin = omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in ksi=%4.3f s\n",cpu_time);
/** - compute lensed \f$ C_l\f$'s by integration */
//debut = omp_get_wtime();
if (ple->has_tt==_TRUE_) {
class_call(lensing_lensed_cl_tt(ksi,d00,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_tt(ple,cl_tt),
ple->error_message,
ple->error_message);
}
}
if (ple->has_te==_TRUE_) {
class_call(lensing_lensed_cl_te(ksiX,d20,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_te(ple,cl_te),
ple->error_message,
ple->error_message);
}
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
class_call(lensing_lensed_cl_ee_bb(ksip,ksim,d22,d2m2,w8,num_mu-1,ple),
ple->error_message,
ple->error_message);
if (ppr->accurate_lensing == _FALSE_) {
class_call(lensing_addback_cl_ee_bb(ple,cl_ee,cl_bb),
ple->error_message,
ple->error_message);
}
}
//fin=omp_get_wtime();
//cpu_time = (fin-debut);
//printf("time in final lensing computation=%4.3f s\n",cpu_time);
/** - spline computed \f$ C_l\f$'s in view of interpolation */
class_call(array_spline_table_lines(ple->l,
ple->l_size,
ple->cl_lens,
ple->lt_size,
ple->ddcl_lens,
_SPLINE_EST_DERIV_,
ple->error_message),
ple->error_message,
ple->error_message);
/** - Free lots of stuff **/
free(buf_dxx);
free(d00);
free(d11);
free(d1m1);
free(d2m2);
if (ple->has_te==_TRUE_) {
free(d20);
free(d3m1);
free(d4m2);
}
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(d22);
free(d31);
free(d3m3);
free(d40);
free(d4m4);
}
if (ple->has_tt==_TRUE_)
free(ksi);
if (ple->has_te==_TRUE_)
free(ksiX);
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(ksip);
free(ksim);
}
free(Cgl);
free(Cgl2);
free(sigma2);
free(mu);
free(w8);
free(cl_unlensed);
free(cl_tt);
if (ple->has_te==_TRUE_)
free(cl_te);
if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) {
free(cl_ee);
free(cl_bb);
}
free(cl_pp);
/** - Exit **/
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by lensing_init().
*
* To be called at the end of each run, only when no further calls to
* lensing_cl_at_l() are needed.
*
* @param ple Input: pointer to lensing structure (which fields must be freed)
* @return the error status
*/
int lensing_free(
struct lensing * ple
) {
if (ple->has_lensed_cls == _TRUE_) {
free(ple->l);
free(ple->cl_lens);
free(ple->ddcl_lens);
free(ple->l_max_lt);
}
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the lensing structure
*
* @param ppr Input: pointer to precision structure
* @param psp Input: pointer to spectra structure
* @param ple Input/output: pointer to lensing structure
* @return the error status
*/
int lensing_indices(
struct precision * ppr,
struct spectra * psp,
struct lensing * ple
){
int index_l;
double ** cl_md_ic; /* array with argument
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */
double ** cl_md; /* array with argument
cl_md[index_md][index_ct] */
int index_md;
int index_lt;
/* indices of all Cl types (lensed and unlensed) */
if (psp->has_tt == _TRUE_) {
ple->has_tt = _TRUE_;
ple->index_lt_tt=psp->index_ct_tt;
}
else {
ple->has_tt = _FALSE_;
}
if (psp->has_ee == _TRUE_) {
ple->has_ee = _TRUE_;
ple->index_lt_ee=psp->index_ct_ee;
}
else {
ple->has_ee = _FALSE_;
}
if (psp->has_te == _TRUE_) {
ple->has_te = _TRUE_;
ple->index_lt_te=psp->index_ct_te;
}
else {
ple->has_te = _FALSE_;
}
if (psp->has_bb == _TRUE_) {
ple->has_bb = _TRUE_;
ple->index_lt_bb=psp->index_ct_bb;
}
else {
ple->has_bb = _FALSE_;
}
if (psp->has_pp == _TRUE_) {
ple->has_pp = _TRUE_;
ple->index_lt_pp=psp->index_ct_pp;
}
else {
ple->has_pp = _FALSE_;
}
if (psp->has_tp == _TRUE_) {
ple->has_tp = _TRUE_;
ple->index_lt_tp=psp->index_ct_tp;
}
else {
ple->has_tp = _FALSE_;
}
if (psp->has_dd == _TRUE_) {
ple->has_dd = _TRUE_;
ple->index_lt_dd=psp->index_ct_dd;
}
else {
ple->has_dd = _FALSE_;
}
if (psp->has_td == _TRUE_) {
ple->has_td = _TRUE_;
ple->index_lt_td=psp->index_ct_td;
}
else {
ple->has_td = _FALSE_;
}
if (psp->has_ll == _TRUE_) {
ple->has_ll = _TRUE_;
ple->index_lt_ll=psp->index_ct_ll;
}
else {
ple->has_ll = _FALSE_;
}
if (psp->has_tl == _TRUE_) {
ple->has_tl = _TRUE_;
ple->index_lt_tl=psp->index_ct_tl;
}
else {
ple->has_tl = _FALSE_;
}
ple->lt_size = psp->ct_size;
/* number of multipoles */
ple->l_unlensed_max = psp->l_max_tot;
ple->l_lensed_max = ple->l_unlensed_max - ppr->delta_l_max;
for (index_l=0; (index_l < psp->l_size_max) && (psp->l[index_l] <= ple->l_lensed_max); index_l++);
if (index_l < psp->l_size_max) index_l++; /* one more point in order to be able to interpolate till ple->l_lensed_max */
ple->l_size = index_l+1;
class_alloc(ple->l,ple->l_size*sizeof(double),ple->error_message);
for (index_l=0; index_l < ple->l_size; index_l++) {
ple->l[index_l] = psp->l[index_l];
}
/* allocate table where results will be stored */
class_alloc(ple->cl_lens,
ple->l_size*ple->lt_size*sizeof(double),
ple->error_message);
class_alloc(ple->ddcl_lens,
ple->l_size*ple->lt_size*sizeof(double),
ple->error_message);
/* fill with unlensed cls */
class_alloc(cl_md_ic,
psp->md_size*sizeof(double *),
ple->error_message);
class_alloc(cl_md,
psp->md_size*sizeof(double *),
ple->error_message);
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
class_alloc(cl_md[index_md],
psp->ct_size*sizeof(double),
ple->error_message);
if (psp->ic_size[index_md] > 1)
class_alloc(cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double),
ple->error_message);
}
for (index_l=0; index_l<ple->l_size; index_l++) {
class_call(spectra_cl_at_l(psp,ple->l[index_l],&(ple->cl_lens[index_l*ple->lt_size]),cl_md,cl_md_ic),
psp->error_message,
ple->error_message);
}
for (index_md = 0; index_md < psp->md_size; index_md++) {
if (psp->md_size > 1)
free(cl_md[index_md]);
if (psp->ic_size[index_md] > 1)
free(cl_md_ic[index_md]);
}
free(cl_md_ic);
free(cl_md);
/* we want to output Cl_lensed up to the same l_max as Cl_unlensed
(even if a number delta_l_max of extra values of l have been used
internally for more accurate results). Notable exception to the
above rule: ClBB_lensed(scalars) must be outputed at least up to the same l_max as
ClEE_unlensed(scalars) (since ClBB_unlensed is null for scalars)
*/
class_alloc(ple->l_max_lt,ple->lt_size*sizeof(double),ple->error_message);
for (index_lt = 0; index_lt < ple->lt_size; index_lt++) {
ple->l_max_lt[index_lt]=0.;
for (index_md = 0; index_md < psp->md_size; index_md++) {
ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][index_lt]);
if ((ple->has_bb == _TRUE_) && (ple->has_ee == _TRUE_) && (index_lt == ple->index_lt_bb)) {
ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][ple->index_lt_ee]);
}
}
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksi Input: Lensed correlation function (ksi[index_mu])
* @param d00 Input: Legendre polynomials (\f$ d^l_{00}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_tt(
double *ksi,
double **d00,
double *w8,
int nmu,
struct lensing * ple
) {
double cle;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,cle) \
schedule (static)
for(index_l=0; index_l<ple->l_size; index_l++){
cle=0;
for (imu=0;imu<nmu;imu++) {
cle += ksi[imu]*d00[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt]=cle*2.0*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{tt}\f$ power spectrum
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_tt Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_tt(
struct lensing * ple,
double *cl_tt) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt] += cl_tt[l];
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksiX Input: Lensed correlation function (ksiX[index_mu])
* @param d20 Input: Wigner d-function (\f$ d^l_{20}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_te(
double *ksiX,
double **d20,
double *w8,
int nmu,
struct lensing * ple
) {
double clte;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,clte) \
schedule (static)
for(index_l=0; index_l < ple->l_size; index_l++){
clte=0;
for (imu=0;imu<nmu;imu++) {
clte += ksiX[imu]*d20[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te]=clte*2.0*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{te}\f$ power spectrum
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_te Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_te(
struct lensing * ple,
double *cl_te) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te] += cl_te[l];
}
return _SUCCESS_;
}
/**
* This routine computes the lensed power spectra by Gaussian quadrature
*
* @param ksip Input: Lensed correlation function (ksi+[index_mu])
* @param ksim Input: Lensed correlation function (ksi-[index_mu])
* @param d22 Input: Wigner d-function (\f$ d^l_{22}\f$[l][index_mu])
* @param d2m2 Input: Wigner d-function (\f$ d^l_{2-2}\f$[l][index_mu])
* @param w8 Input: Legendre quadrature weights (w8[index_mu])
* @param nmu Input: Number of quadrature points (0<=index_mu<=nmu)
* @param ple Input/output: Pointer to the lensing structure
* @return the error status
*/
int lensing_lensed_cl_ee_bb(
double *ksip,
double *ksim,
double **d22,
double **d2m2,
double *w8,
int nmu,
struct lensing * ple
) {
double clp, clm;
int imu;
int index_l;
/** Integration by Gauss-Legendre quadrature. **/
#pragma omp parallel for \
private (imu,index_l,clp,clm) \
schedule (static)
for(index_l=0; index_l < ple->l_size; index_l++){
clp=0; clm=0;
for (imu=0;imu<nmu;imu++) {
clp += ksip[imu]*d22[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
clm += ksim[imu]*d2m2[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */
}
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee]=(clp+clm)*_PI_;
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb]=(clp-clm)*_PI_;
}
return _SUCCESS_;
}
/**
* This routine adds back the unlensed \f$ cl_{ee}\f$, \f$ cl_{bb}\f$ power spectra
* Used in case of fast (and BB inaccurate) integration of
* correlation functions.
*
* @param ple Input/output: Pointer to the lensing structure
* @param cl_ee Input: Array of unlensed power spectrum
* @param cl_bb Input: Array of unlensed power spectrum
* @return the error status
*/
int lensing_addback_cl_ee_bb(
struct lensing * ple,
double * cl_ee,
double * cl_bb) {
int index_l, l;
for (index_l=0; index_l<ple->l_size; index_l++) {
l = (int)ple->l[index_l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee] += cl_ee[l];
ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb] += cl_bb[l];
}
return _SUCCESS_;
}
/**
* This routine computes the d00 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d00 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d00(
double * mu,
int num_mu,
int lmax,
double ** d00
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
for (l=1; l<lmax; l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(2*ll+1)/(ll+1);
fac2[l] = sqrt((2*ll+3)/(2*ll-1))*ll/(ll+1);
fac3[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
dlm1=1.0/sqrt(2.); /* l=0 */
d00[index_mu][0]=dlm1*sqrt(2.);
dl=mu[index_mu] * sqrt(3./2.); /*l=1*/
d00[index_mu][1]=dl*sqrt(2./3.);
for(l=1;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d00 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac2[l]*dlm1;
d00[index_mu][l+1] = dlp1 * fac3[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3);
return _SUCCESS_;
}
/**
* This routine computes the d11 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d11 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d11(
double * mu,
int num_mu,
int lmax,
double ** d11
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2));
fac2[l] = 1.0/(ll*(ll+1.));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d11[index_mu][0]=0;
dlm1=(1.0+mu[index_mu])/2. * sqrt(3./2.); /*l=1*/
d11[index_mu][1]=dlm1 * sqrt(2./3.);
dl=(1.0+mu[index_mu])/2.*(2.0*mu[index_mu]-1.0) * sqrt(5./2.); /*l=2*/
d11[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d11 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d11[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d1m1 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d1m1 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d1m1(
double * mu,
int num_mu,
int lmax,
double ** d1m1
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2));
fac2[l] = 1.0/(ll*(ll+1.));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d1m1[index_mu][0]=0;
dlm1=(1.0-mu[index_mu])/2. * sqrt(3./2.); /*l=1*/
d1m1[index_mu][1]=dlm1 * sqrt(2./3.);
dl=(1.0-mu[index_mu])/2.*(2.0*mu[index_mu]+1.0) * sqrt(5./2.); /*l=2*/
d1m1[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d1m1 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d1m1[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d2m2 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d2m2 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d2m2(
double * mu,
int num_mu,
int lmax,
double ** d2m2
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3));
fac2[l] = 4.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d2m2[index_mu][0]=0;
dlm1=0.; /*l=1*/
d2m2[index_mu][1]=0;
dl=(1.0-mu[index_mu])*(1.0-mu[index_mu])/4. * sqrt(5./2.); /*l=2*/
d2m2[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d2m2 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d2m2[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d22 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d22 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d22(
double * mu,
int num_mu,
int lmax,
double ** d22
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3));
fac2[l] = 4.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d22[index_mu][0]=0;
dlm1=0.; /*l=1*/
d22[index_mu][1]=0;
dl=(1.0+mu[index_mu])*(1.0+mu[index_mu])/4. * sqrt(5./2.); /*l=2*/
d22[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d22[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d20 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d20 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d20(
double * mu,
int num_mu,
int lmax,
double ** d20
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=2;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-1)*(ll+3)));
fac3[l] = sqrt((2*ll+3)*(ll-2)*(ll+2)/((2*ll-1)*(ll-1)*(ll+3)));
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d20[index_mu][0]=0;
dlm1=0.; /*l=1*/
d20[index_mu][1]=0;
dl=sqrt(15.)/4.*(1-mu[index_mu]*mu[index_mu]); /*l=2*/
d20[index_mu][2] = dl * sqrt(2./5.);
for(l=2;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1;
d20[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d31 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d31 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d31(
double * mu,
int num_mu,
int lmax,
double ** d31
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1);
fac2[l] = 3.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d31[index_mu][0]=0;
d31[index_mu][1]=0;
dlm1=0.; /*l=2*/
d31[index_mu][2]=0;
dl=sqrt(105./2.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d31[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1;
d31[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d3m1 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d3m1 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d3m1(
double * mu,
int num_mu,
int lmax,
double ** d3m1
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1);
fac2[l] = 3.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d3m1[index_mu][0]=0;
d3m1[index_mu][1]=0;
dlm1=0.; /*l=2*/
d3m1[index_mu][2]=0;
dl=sqrt(105./2.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d3m1[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d3m1[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d3m3 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d3m3 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d3m3(
double * mu,
int num_mu,
int lmax,
double ** d3m3
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=3;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-2)*(ll+4));
fac2[l] = 9.0/(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-3)*(ll+3)*(l+1)/((ll-2)*(ll+4)*ll);
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d3m3[index_mu][0]=0;
d3m3[index_mu][1]=0;
dlm1=0.; /*l=2*/
d3m3[index_mu][2]=0;
dl=sqrt(7./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/
d3m3[index_mu][3] = dl * sqrt(2./7.);
for(l=3;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d3m3[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d40 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d40 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d40(
double * mu,
int num_mu,
int lmax,
double ** d40
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)));
fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)/((2*ll-1)*(ll-3)*(ll+5)));
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d40[index_mu][0]=0;
d40[index_mu][1]=0;
d40[index_mu][2]=0;
dlm1=0.; /*l=3*/
d40[index_mu][3]=0;
dl=sqrt(315.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d40[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1;
d40[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d4m2 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d4m2 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d4m2(
double * mu,
int num_mu,
int lmax,
double ** d4m2
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)*(ll-1)*(ll+3))) * (ll+1.);
fac2[l] = 8./(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)*(ll-2)*(ll+2)/((2*ll-1)*(ll-3)*(ll+5)*(ll-1)*(ll+3)))*(ll+1)/ll;
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d4m2[index_mu][0]=0;
d4m2[index_mu][1]=0;
d4m2[index_mu][2]=0;
dlm1=0.; /*l=3*/
d4m2[index_mu][3]=0;
dl=sqrt(126.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d4m2[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d4m2[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
/**
* This routine computes the d4m4 term
*
* @param mu Input: Vector of cos(beta) values
* @param num_mu Input: Number of cos(beta) values
* @param lmax Input: maximum multipole
* @param d4m4 Input/output: Result is stored here
*
* Wigner d-functions, computed by recurrence
* actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability
* Formulae from Kostelec & Rockmore 2003
**/
int lensing_d4m4(
double * mu,
int num_mu,
int lmax,
double ** d4m4
) {
double ll, dlm1, dl, dlp1;
int index_mu, l;
double *fac1, *fac2, *fac3, *fac4;
ErrorMsg erreur;
class_alloc(fac1,lmax*sizeof(double),erreur);
class_alloc(fac2,lmax*sizeof(double),erreur);
class_alloc(fac3,lmax*sizeof(double),erreur);
class_alloc(fac4,lmax*sizeof(double),erreur);
for (l=4;l<lmax;l++) {
ll = (double) l;
fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-3)*(ll+5));
fac2[l] = 16./(ll*(ll+1));
fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-4)*(ll+4)*(ll+1)/((ll-3)*(ll+5)*ll);
fac4[l] = sqrt(2./(2*ll+3));
}
#pragma omp parallel for \
private (index_mu,dlm1,dl,dlp1,l,ll) \
schedule (static)
for (index_mu=0;index_mu<num_mu;index_mu++) {
d4m4[index_mu][0]=0;
d4m4[index_mu][1]=0;
d4m4[index_mu][2]=0;
dlm1=0.; /*l=3*/
d4m4[index_mu][3]=0;
dl=sqrt(9./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/
d4m4[index_mu][4] = dl * sqrt(2./9.);
for(l=4;l<lmax;l++){
ll=(double) l;
/* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */
dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1;
d4m4[index_mu][l+1] = dlp1 * fac4[l];
dlm1 = dl;
dl = dlp1;
}
}
free(fac1); free(fac2); free(fac3); free(fac4);
return _SUCCESS_;
}
|
GB_binop__bxnor_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__bxnor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__bxnor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint8)
// A*D function (colscale): GB (_AxD__bxnor_uint8)
// D*A function (rowscale): GB (_DxB__bxnor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint8)
// C=scalar+B GB (_bind1st__bxnor_uint8)
// C=scalar+B' GB (_bind1st_tran__bxnor_uint8)
// C=A+scalar GB (_bind2nd__bxnor_uint8)
// C=A'+scalar GB (_bind2nd_tran__bxnor_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT8 || GxB_NO_BXNOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxnor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
station_stat.c | #include "station_stat.h"
void station_stat_init(station_stat_t* station_stat)
{
for (int i = 0; i < 2; i++) {
station_stat->num_door_opening[i] = 0;
station_stat->total_wait_time[i] = 0;
station_stat->min_wait_time[i] = 0;
station_stat->max_wait_time[i] = 0;
station_stat->last_closed_time[i] = 0;
}
}
void station_stat_open_door(station_stat_t* station_stat, int current_time, int duration, bool forward)
{
#pragma omp critical
{
int idx = forward ? STATION_STAT_FORWARD : STATION_STAT_REVERSE;
int time_taken = current_time - station_stat->last_closed_time[idx];
if (station_stat->num_door_opening[idx] == 0) {
// Do not count waiting time of first train
} else {
station_stat->total_wait_time[idx] += time_taken;
if (station_stat->num_door_opening[idx] == 1) {
station_stat->min_wait_time[idx] = time_taken;
station_stat->max_wait_time[idx] = time_taken;
} else {
station_stat->min_wait_time[idx] = min(station_stat->min_wait_time[idx], time_taken);
station_stat->max_wait_time[idx] = max(station_stat->max_wait_time[idx], time_taken);
}
}
station_stat->last_closed_time[idx] = current_time + duration;
station_stat->num_door_opening[idx]++;
}
}
|
omp_simd_safelen.c | //Variable examples of using simd directives
void foo (int n, double *a, double* b)
{
for (int i=0; i<n; i++)
a[i]=b[i];
}
void foo2 (int n, double *a, double* b)
{
#pragma omp simd safelen(16)
for (int i=0; i<n; i++)
a[i]=b[i];
}
void foo3 (int n, double *a, double* b)
{
int j=0;
for (int i=0; i<n; i++,j++)
{
a[i]=b[i]+j;
}
}
void foo32 (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
void foo33 (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
void fooAligned (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
void fooAligned2 (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
double work( double *a, double *b, int n )
{
int i;
double tmp, sum;
sum = 0.0;
for (i = 0; i < n; i++) {
tmp = a[i] + b[i];
sum += tmp;
}
return sum;
}
#define N 45
int a[N], b[N], c[N];
void foo4(int i, double* P)
{
int j;
for (i = 0; i < 999; ++i) {
j = P[i];
}
}
void work2( double **a, double **b, double **c, int n )
{
int i, j;
double tmp;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
tmp = a[i][j] + b[i][j];
c[i][j] = tmp;
}
}
}
void work3( double **a, double **b, double **c, int n )
{
int i, j;
double tmp;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
tmp = a[i][j] + b[i][j];
c[i][j] = tmp;
}
}
}
// declare simd can show up several times!
float bar(int * p) {
*p = *p +10;
return *p;
}
// declare simd can show up several times!
float bar2(int * p) {
*p = *p +10;
return *p;
}
|
bucle-for.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv) {
int i, n = 9;
if(argc < 2) {
fprintf(stderr,"\n[ERROR] - Falta nº iteraciones \n");
exit(-1);
}
n = atoi(argv[1]);
#pragma omp parallel
{
#pragma omp for
for (i=0; i<n; i++)
printf("thread %d ejecuta la iteración %d del bucle\n",
omp_get_thread_num(),i);
}
return(0);
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
{
#pragma omp atomic
#if defined(_MSC_VER) && _MSC_VER >= 1900
(info[j].users) -= 1;
#else
--(info[j].users);
#endif
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
}
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
4099.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) {
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 1000; t2 += 1) {
#pragma omp parallel for private(t4,t6,t8,t10,t12)
for (t4 = 1; t4 <= n - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 16)
for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12];
#pragma omp parallel for private(t4,t6,t8,t10,t12)
for (t4 = 1; t4 <= n - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 16)
for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12];
}
}
|
passageMarker.c | /*
Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk)
This file is part of Velvet.
Velvet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Velvet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velvet; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "allocArray.h"
#include "graph.h"
#include "recycleBin.h"
#include "passageMarker.h"
#include "tightString.h"
#include "utility.h"
typedef struct passage_st PassageMarker;
struct passage_st {
struct node_st *node;
PassageMarkerI nextInNode;
PassageMarkerI previousInNode;
PassageMarkerI twinMarker;
PassageMarkerI nextInSequence;
IDnum start;
IDnum finishOffset;
IDnum sequenceID;
boolean status;
} ATTRIBUTE_PACKED;
static AllocArray *markerMemory = NULL;
DECLARE_FAST_ACCESSORS (PM, PassageMarker, markerMemory)
static RecycleBin *listMemory = NULL;
static const int LISTBLOCKSIZE = 10000;
PassageMarkerI allocatePassageMarker()
{
if (markerMemory == NULL)
markerMemory =
newAllocArray (sizeof(PassageMarker), "PassageMarker");
return allocArrayAllocate (markerMemory);
}
static void deallocatePassageMarker(PassageMarkerI marker)
{
allocArrayFree(markerMemory, marker);
}
PassageMarkerList *allocatePassageMarkerList()
{
if (listMemory == NULL)
listMemory =
newRecycleBin(sizeof(PassageMarkerList),
LISTBLOCKSIZE);
return (PassageMarkerList *) allocatePointer(listMemory);
}
void deallocatePassageMarkerList(PassageMarkerList * marker)
{
deallocatePointer(listMemory, marker);
}
void setNextInSequence(PassageMarkerI previous, PassageMarkerI next)
{
if (previous == NULL_IDX)
return;
PM_FI2P (previous)->nextInSequence = next;
}
void extractPassageMarker(PassageMarkerI marker)
{
PassageMarker *twin;
PassageMarker *markerVal;
if (marker == NULL_IDX)
return;
markerVal = PM_FI2P (marker);
if (markerVal->node == NULL_IDX)
return;
if (markerVal->previousInNode == marker)
setMarker(markerVal->node, markerVal->nextInNode);
else
setNextInNode(markerVal->previousInNode, markerVal->nextInNode);
markerVal->previousInNode = NULL_IDX;
markerVal->nextInNode = NULL_IDX;
markerVal->node = NULL_IDX;
twin = PM_FI2P (markerVal->twinMarker);
twin->nextInNode = NULL_IDX;
twin->previousInNode = NULL_IDX;
twin->node = NULL_IDX;
}
void destroyPassageMarker(PassageMarkerI marker)
{
PassageMarker *markerVal;
PassageMarker *twinVal;
PassageMarkerI twin;
if (marker == NULL_IDX)
return;
markerVal = PM_FI2P (marker);
twin = markerVal->twinMarker;
extractPassageMarker(marker);
if (markerVal->nextInSequence != NULL_IDX
&& PM_FI2P (PM_FI2P (markerVal->nextInSequence)->twinMarker)->nextInSequence == twin)
PM_FI2P (PM_FI2P (markerVal->nextInSequence)->twinMarker)->nextInSequence = NULL_IDX;
twinVal = PM_FI2P (twin);
if (twinVal->nextInSequence != NULL_IDX
&& PM_FI2P (PM_FI2P (twinVal->nextInSequence)->twinMarker)->nextInSequence == marker)
PM_FI2P (PM_FI2P (twinVal->nextInSequence)->twinMarker)->nextInSequence = NULL_IDX;
deallocatePassageMarker(twin);
deallocatePassageMarker(marker);
//velvetLog("Done destroying passage marker\n");
}
void destroyAllPassageMarkers()
{
if (markerMemory != NULL)
destroyAllocArray(markerMemory);
if (listMemory != NULL)
destroyRecycleBin(listMemory);
}
void setPreviousInSequence(PassageMarkerI previous, PassageMarkerI next)
{
if (next == NULL_IDX)
return;
else if (previous == NULL_IDX)
PM_FI2P (PM_FI2P (next)->twinMarker)->nextInSequence = NULL_IDX;
else
PM_FI2P (PM_FI2P (next)->twinMarker)->nextInSequence = PM_FI2P (previous)->twinMarker;
}
void disconnectNextPassageMarker(PassageMarkerI marker, Graph * graph)
{
PassageMarkerI middle = getNextInSequence(marker);
PassageMarkerI next = getNextInSequence(middle);
setPreviousInSequence(marker, next);
concatenatePassageMarkers(marker, middle);
setNextInSequence(middle, NULL_IDX);
setPreviousInSequence(NULL_IDX, middle);
}
void deleteNextPassageMarker(PassageMarkerI marker, Graph * graph)
{
PassageMarkerI middle = getNextInSequence(marker);
PassageMarkerI next = getNextInSequence(middle);
setPreviousInSequence(marker, next);
setNextInSequence(marker, next);
setNextInSequence(middle, NULL_IDX);
setPreviousInSequence(NULL_IDX, middle);
}
PassageMarkerI getNextInNode(PassageMarkerI marker)
{
if (marker == NULL_IDX)
return NULL_IDX;
return PM_FI2P (marker)->nextInNode;
}
void setNextInNode(PassageMarkerI marker, PassageMarkerI next)
{
PassageMarker *markerVal;
// DEBUG
if (next == marker || next == getTwinMarker(marker))
abort();
if (marker == NULL_IDX)
return;
markerVal = PM_FI2P (marker);
if (next == NULL_IDX) {
markerVal->nextInNode = NULL_IDX;
PM_FI2P (markerVal->twinMarker)->nextInNode = NULL_IDX;
} else {
PassageMarker *nextVal;
if (markerVal->twinMarker == NULL_IDX) {
velvetLog("Dead marker in node %li %li\n",
(long) getNodeID(getNode(marker)),
(long) getPassageMarkerSequenceID(marker));
abort();
}
nextVal = PM_FI2P (next);
markerVal->nextInNode = next;
PM_FI2P (markerVal->twinMarker)->nextInNode = nextVal->twinMarker;
nextVal->previousInNode = marker;
PM_FI2P (nextVal->twinMarker)->previousInNode = markerVal->twinMarker;
}
}
void setTopOfTheNode(PassageMarkerI marker)
{
if (marker == NULL_IDX)
return;
PM_FI2P (marker)->previousInNode = marker;
}
PassageMarkerI getNextInSequence(PassageMarkerI marker)
{
if (marker != NULL_IDX)
{
PassageMarker *markerVal;
markerVal = PM_FI2P (marker);
if (markerVal->nextInSequence == NULL_IDX)
return NULL_IDX;
return markerVal->nextInSequence;
}
return NULL_IDX;
}
PassageMarkerI getPreviousInSequence(PassageMarkerI marker)
{
PassageMarker *twinVal;
if (marker == NULL_IDX)
return NULL_IDX;
twinVal = PM_FI2P (PM_FI2P (marker)->twinMarker);
if (twinVal->nextInSequence == NULL_IDX)
return NULL_IDX;
return PM_FI2P (twinVal->nextInSequence)->twinMarker;
}
void
connectPassageMarkers(PassageMarkerI previous, PassageMarkerI next,
Graph * graph)
{
if (previous != NULL_IDX)
setNextInSequence(previous, next);
if (next != NULL_IDX)
setPreviousInSequence(previous, next);
}
char *readPassageMarker(PassageMarkerI marker)
{
PassageMarker *markerVal;
char *s = mallocOrExit(100, char);
if (marker == NULL_IDX)
return s;
markerVal = PM_FI2P (marker);
sprintf(s, "MARKER %ld (%lld -> %lld):", (long) markerVal->sequenceID,
(long long) markerVal->start, (long long) getPassageMarkerFinish(marker));
if (getPreviousInSequence(marker) == NULL_IDX)
sprintf(s, "%s START -> %ld", s,
(long) getNodeID(getNode(marker)));
else
sprintf(s, "%s %ld -> %ld", s,
(long) getNodeID(getNode(getPreviousInSequence(marker))),
(long) getNodeID(getNode(marker)));
if (getNextInSequence(marker) == NULL_IDX)
sprintf(s, "%s -> FINISH", s);
else
sprintf(s, "%s -> %ld ", s,
(long) getNodeID(getNode(getNextInSequence(marker))));
return s;
}
PassageMarkerI addPassageMarker(IDnum sequenceID, Coordinate start,
Node * node)
{
PassageMarkerI marker = allocatePassageMarker();
PassageMarkerI twinMarker = allocatePassageMarker();
PassageMarker *markerVal;
PassageMarker *twinVal;
markerVal = PM_FI2P (marker);
twinVal = PM_FI2P (twinMarker);
markerVal->sequenceID = sequenceID;
markerVal->start = start;
markerVal->node = node;
markerVal->nextInSequence = NULL_IDX;
markerVal->finishOffset = 0;
markerVal->twinMarker = twinMarker;
markerVal->status = false;
twinVal->sequenceID = -sequenceID;
twinVal->start = start + getNodeLength(node);
twinVal->node = getTwinNode(node);
twinVal->nextInSequence = NULL_IDX;
twinVal->finishOffset = 0;
twinVal->twinMarker = marker;
twinVal->status = false;
setNextInNode(marker, getMarker(node));
setMarker(node, marker);
return marker;
}
PassageMarkerList *copyPassageMarkerList(PassageMarkerList * list)
{
PassageMarkerList *copy;
PassageMarkerList *result = NULL;
PassageMarkerList *pointer;
if (list == NULL)
return NULL;
for (pointer = list; pointer != NULL; pointer = pointer->next) {
copy = allocatePassageMarkerList();
copy->marker = pointer->marker;
copy->next = result;
result = copy;
}
return result;
}
void incrementFinishOffset(PassageMarkerI marker, Coordinate offset)
{
PM_FI2P (marker)->finishOffset += offset;
}
void incrementStartOffset(PassageMarkerI marker, Coordinate offset)
{
PM_FI2P (PM_FI2P (marker)->twinMarker)->finishOffset += offset;
}
Coordinate getFinishOffset(PassageMarkerI marker)
{
return PM_FI2P (marker)->finishOffset;
}
void setFinishOffset(PassageMarkerI marker, Coordinate offset)
{
PM_FI2P (marker)->finishOffset = offset;
}
Coordinate getStartOffset(PassageMarkerI marker)
{
return PM_FI2P (PM_FI2P (marker)->twinMarker)->finishOffset;
}
void setStartOffset(PassageMarkerI marker, Coordinate offset)
{
PM_FI2P (PM_FI2P (marker)->twinMarker)->finishOffset = offset;
}
void transposePassageMarker(PassageMarkerI marker, Node * node)
{
PassageMarker *markerVal;
PassageMarker *twinMarkerVal;
markerVal = PM_FI2P (marker);
twinMarkerVal = PM_FI2P (markerVal->twinMarker);
insertPassageMarker(marker, node);
markerVal->node = node;
insertPassageMarker(markerVal->twinMarker, getTwinNode(node));
twinMarkerVal->node = getTwinNode(node);
}
PassageMarkerI getTwinMarker(PassageMarkerI marker)
{
return PM_FI2P (marker)->twinMarker;
}
IDnum getPassageMarkerSequenceID(PassageMarkerI marker)
{
return PM_FI2P (marker)->sequenceID;
}
IDnum getAbsolutePassMarkerSeqID(PassageMarkerI marker)
{
IDnum ID = PM_FI2P (marker)->sequenceID;
if (ID > 0)
return ID;
else
return -ID;
}
Node *getNode(PassageMarkerI marker)
{
if (marker == NULL_IDX)
return NULL;
return PM_FI2P (marker)->node;
}
void concatenatePassageMarkers(PassageMarkerI marker,
PassageMarkerI next)
{
PassageMarker *markerVal;
PassageMarker *nextVal;
if (marker == NULL_IDX || next == NULL_IDX)
return;
markerVal = PM_FI2P (marker);
nextVal = PM_FI2P (next);
markerVal->finishOffset = nextVal->finishOffset;
PM_FI2P (markerVal->twinMarker)->start = PM_FI2P (nextVal->twinMarker)->start;
markerVal->nextInSequence = nextVal->nextInSequence;
}
boolean getPassageMarkerStatus(PassageMarkerI marker)
{
return PM_FI2P (marker)->status;
}
void setPassageMarkerStatus(PassageMarkerI marker, boolean status)
{
PassageMarker *markerVal = PM_FI2P (marker);
markerVal->status = status;
PM_FI2P (markerVal->twinMarker)->status = status;
}
boolean isDestinationToMarker(PassageMarkerI marker, Node * node)
{
PassageMarker *markerVal = PM_FI2P (marker);
if (markerVal->nextInSequence == NULL_IDX)
return false;
return PM_FI2P (markerVal->nextInSequence)->node == node;
}
boolean isTerminal(PassageMarkerI marker)
{
PassageMarker *markerVal;
if (marker == NULL_IDX)
return false;
markerVal = PM_FI2P (marker);
return markerVal->nextInSequence == NULL_IDX;
}
boolean isInitial(PassageMarkerI marker)
{
PassageMarker *markerVal;
if (marker == NULL_IDX)
return false;
markerVal = PM_FI2P (marker);
if (markerVal->twinMarker == NULL_IDX) {
velvetLog("Unpaired marker seq %ld start %lld node %ld\n",
(long) markerVal->sequenceID, (long long) markerVal->start,
(long) getNodeID(markerVal->node));
velvetLog("SNAFU\n");
abort();
}
return PM_FI2P (markerVal->twinMarker)->nextInSequence == NULL_IDX;
}
Coordinate getPassageMarkerStart(PassageMarkerI marker)
{
return PM_FI2P (marker)->start;
}
void setPassageMarkerStart(PassageMarkerI marker, Coordinate start)
{
PM_FI2P (marker)->start = start;
}
Coordinate getPassageMarkerFinish(PassageMarkerI marker)
{
PassageMarker *twinMarkerVal;
twinMarkerVal = PM_FI2P (PM_FI2P (marker)->twinMarker);
if (twinMarkerVal->start == -10)
return -10;
return twinMarkerVal->start;
}
void setPassageMarkerFinish(PassageMarkerI marker, Coordinate finish)
{
PassageMarker *twinMarkerVal;
twinMarkerVal = PM_FI2P (PM_FI2P (marker)->twinMarker);
if (finish == -10)
twinMarkerVal->start = -10;
twinMarkerVal->start = finish;
}
Coordinate getPassageMarkerLength(PassageMarkerI marker)
{
PassageMarker *markerVal;
PassageMarker *twinMarkerVal;
markerVal = PM_FI2P (marker);
twinMarkerVal = PM_FI2P (markerVal->twinMarker);
if (markerVal->start == -10 || twinMarkerVal->start == -10)
return 0;
else if (markerVal->sequenceID > 0)
return twinMarkerVal->start - markerVal->start;
else
return markerVal->start - twinMarkerVal->start;
}
int passageMarkerDirection(PassageMarkerI marker)
{
if (PM_FI2P (marker)->sequenceID > 0)
return 1;
else
return -1;
}
PassageMarkerI addUncertainPassageMarker(IDnum sequenceID, Node * node)
{
PassageMarkerI marker = allocatePassageMarker();
PassageMarkerI twinMarker = allocatePassageMarker();
PassageMarker *markerVal = PM_FI2P (marker);
PassageMarker *twinMarkerVal = PM_FI2P (twinMarker);
markerVal->sequenceID = sequenceID;
markerVal->start = -10;
markerVal->node = node;
markerVal->nextInSequence = NULL_IDX;
markerVal->finishOffset = 0;
markerVal->twinMarker = twinMarker;
markerVal->status = false;
twinMarkerVal->sequenceID = -sequenceID;
twinMarkerVal->start = -10;
if (node == NULL)
twinMarkerVal->node = NULL;
else
twinMarkerVal->node = getTwinNode(node);
twinMarkerVal->nextInSequence = NULL_IDX;
twinMarkerVal->finishOffset = 0;
twinMarkerVal->twinMarker = marker;
twinMarkerVal->status = false;
if (node != NULL) {
setNextInNode(marker, getMarker(node));
setMarker(node, marker);
}
return marker;
}
PassageMarkerList *newPassageMarkerList(PassageMarkerI marker,
PassageMarkerList * next)
{
PassageMarkerList *list = allocatePassageMarkerList();
list->marker = marker;
list->next = next;
return list;
}
PassageMarkerI newPassageMarker(IDnum seqID, Coordinate start,
Coordinate finish, Coordinate startOffset,
Coordinate finishOffset)
{
PassageMarkerI marker;
PassageMarkerI twinMarker;
PassageMarker *markerVal;
PassageMarker *twinMarkerVal;
#ifdef _OPENMP
#pragma omp critical
{
#endif
marker = allocatePassageMarker();
twinMarker = allocatePassageMarker();
#ifdef _OPENMP
}
#endif
markerVal = PM_FI2P (marker);
twinMarkerVal = PM_FI2P (twinMarker);
// velvetLog("Values %d\t%d\t%d\t%d\t%d\n", seqID, start, finish, startOffset, finishOffset);
markerVal->sequenceID = seqID;
markerVal->node = NULL;
markerVal->nextInSequence = NULL_IDX;
markerVal->twinMarker = twinMarker;
markerVal->nextInNode = NULL_IDX;
markerVal->status = false;
twinMarkerVal->sequenceID = -seqID;
twinMarkerVal->node = NULL;
twinMarkerVal->nextInSequence = NULL_IDX;
twinMarkerVal->twinMarker = marker;
twinMarkerVal->nextInNode = NULL_IDX;
twinMarkerVal->status = false;
setPassageMarkerStart(marker, start);
setPassageMarkerFinish(marker, finish);
setStartOffset(marker, startOffset);
setFinishOffset(marker, finishOffset);
if (getPassageMarkerLength(marker) < 0) {
velvetLog("Negative marker %ld %lld %lld %lld\n",
(long) getPassageMarkerSequenceID(marker),
(long long) getPassageMarkerStart(marker),
(long long) getPassageMarkerFinish(marker),
(long long) getPassageMarkerLength(marker));
abort();
}
return marker;
}
void exportMarker(FILE * outfile, PassageMarkerI marker,
TightString * sequences, int WORDLENGTH)
{
PassageMarker *markerVal = PM_FI2P (marker);
PassageMarkerI current;
if (markerVal->sequenceID > 0) {
if (!isInitial(marker)) {
return;
}
current = marker;
} else {
if (!isTerminal(marker)) {
return;
}
current = markerVal->twinMarker;
}
velvetFprintf(outfile, "SEQ\t%li\n", (long) PM_FI2P (current)->sequenceID);
for (; current != NULL_IDX; current = PM_FI2P (current)->nextInSequence) {
velvetFprintf(outfile, "%ld\t%lld\t%lld\t%lld\t%lld",
(long) getNodeID(PM_FI2P (current)->node), (long long) getStartOffset(current),
(long long) getPassageMarkerStart(current),
(long long) getPassageMarkerFinish(current),
(long long) getFinishOffset(current));
velvetFprintf(outfile, "\n");
}
}
|
omp-sumof-elements.c |
/*****************************************************************************
Example 1.4 : omp-sumof-elements.c
Objective : Write an OpenMP program to find Sum Of Elements
of One-Dimensional real array.
This example demonstrates the use of OpenMP
Parallel For Directive And Critical Section.
Input : Size of an array
Number of threads
Output : Sum of array elements
Created : Aug 2011
Author : RarchK
*********************************************************************************/
#include<stdio.h>
#include<omp.h>
#include<stdlib.h>
/* Main Program */
main(int argc , char **argv)
{
double *Array, *Array1, *Check, serial_sum, sum;
int array_size, i,threadid,tval,Noofthreads;
printf("\n\t\t---------------------------------------------------------------------------");
printf("\n\t\t Email : RarchK");
printf("\n\t\t---------------------------------------------------------------------------");
printf("\n\t\t Objective :Find the Sum of elements of one-dimensional real array. ");
printf("\n\t\t OpenMP Parallel for directive and Critical Section are used ");
printf("\n\t\t..........................................................................\n");
/* Checking for command line arguments */
if( argc != 3 ){
printf("\t\t Very Few Arguments\n ");
printf("\t\t Syntax : exec <Threads> <array-size>\n");
exit(-1);
}
Noofthreads=atoi(argv[1]);
if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) {
printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n");
exit(-1);
}
array_size=atoi(argv[2]);
/*printf("\n\t\t Enter the size of the array\n");
scanf("%d", &array_size); */
/* Array Size should be positive integer */
if (array_size <= 0) {
printf("\n\t\t Array Size Should Be Of Positive Value ");
exit(1);
}
printf("\n\t\t Threads : %d ",Noofthreads);
printf("\n\t\t Array Size : %d ",array_size);
/* Dynamic Memory Allocation */
Array = (double *) malloc(sizeof(double) * array_size);
Check = (double *) malloc(sizeof(double) * array_size);
/* Array Elements Initialization */
for (i = 0; i < array_size; i++) {
Array[i] = i * 5;
Check[i] = Array[i];
}
sum=0.0;
/* set the number of threads */
omp_set_num_threads(Noofthreads);
/* OpenMP Parallel For Directive And Critical Section */
#pragma omp parallel for
for (i = 0; i < array_size; i++)
{
/* printf("the thread num and its iteration is %d %d \n",omp_get_thread_num(),i); */
#pragma omp critical
sum = sum + Array[i];
} /* End of parallel region */
serial_sum = 0.0;
/* Serail Calculation */
for (i = 0; i < array_size; i++)
serial_sum = serial_sum + Check[i];
if (serial_sum == sum)
printf("\n\n\t\t The Serial And Parallel Sums Are Equal\n");
else {
printf("\n\\nt\t The Serial And Parallel Sums Are UnEqual\n");
exit(1);
}
/* Freeing Memory */
free(Check);
free(Array);
printf("\n\t\t The SumOfElements Of The Array Using OpenMP Directives Is %lf\n", sum);
printf("\t\t The SumOfElements Of The Array By Serial Calculation Is %lf\n\n", serial_sum);
printf("\n\t\t..........................................................................\n");
}
|
reduction-3.c | #include <omp.h>
#include <stdlib.h>
int
main (void)
{
int i = 0, j = 0, k = ~0, l;
double d = 1.0;
#pragma omp parallel num_threads(4)
{
#pragma omp single
{
i = 16;
k ^= (1 << 16);
d += 32.0;
}
#pragma omp for reduction(+:i) reduction(*:d) reduction(&:k) nowait
for (l = 0; l < 4; l++)
{
if (omp_get_num_threads () == 4 && (i != 0 || d != 1.0 || k != ~0))
#pragma omp atomic
j |= 1;
if (l == omp_get_thread_num ())
{
i = omp_get_thread_num ();
d = i + 1;
k = ~(1 << (2 * i));
}
}
if (omp_get_num_threads () == 4)
{
#pragma omp barrier
if (i != (16 + 0 + 1 + 2 + 3))
#pragma omp atomic
j |= 2;
if (d != (33.0 * 1.0 * 2.0 * 3.0 * 4.0))
#pragma omp atomic
j |= 4;
if (k != (~0 ^ 0x55 ^ (1 << 16)))
#pragma omp atomic
j |= 8;
}
}
if (j)
abort ();
return 0;
}
|
rc.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "common.h"
/** abritrary maximum number of rules */
#define MAXRULES 10
/** represents a rule:
greater and lower are the colours you're targeting
factor is the factor which will grow one of the colours */
typedef struct {
RC_RGB_t greater, lower;
float factor;
} rule_t;
/** the rules */
static rule_t rules[MAXRULES];
static size_t nrules = 0;
/** used to add a new rule */
void recolour_addRule(
RC_RGB_t greater,
RC_RGB_t lower,
float factor)
{
if(nrules < MAXRULES) {
rules[nrules].greater = greater;
rules[nrules].lower = lower;
rules[nrules].factor = factor;
nrules++;
} else {
fprintf(stderr, "Too many rules!\n");
abort();
}
}
static inline void _process(pixel_t pin, pixel_t* p)
{
size_t i = 0;
*p = pin;
for(i = 0; i < nrules; ++i) {
rule_t rule = rules[i];
// if the factor is >1, raise the greater component
// else lower the lower component by 1/factor amount
if(rule.factor >= 1.0) {
uint8_t greater = GET(*p, rule.greater);
uint8_t lower = GET(pin, rule.lower);
if(greater > lower) {
float diff = rule.factor * (float)(greater - lower);
//printf(" from %d", GET(pin, rule.greater));
GET(*p, rule.greater) = (uint8_t)SUP(lower + diff, 255.f);
//printf(" to %d by %f\n", GET(*p, rule.greater), diff);
}
} else {
uint8_t greater = GET(pin, rule.greater);
uint8_t lower = GET(*p, rule.lower);
if(greater > lower) {
float diff = (1.f / rule.factor) * (float)(greater - lower);
GET(*p, rule.lower) = (uint8_t)INF(greater - diff, 0.f);
}
}
}
}
typedef struct {
size_t i;
img_t in, out;
} tdata_t;
static void _tprocess(void* data)
{
tdata_t* mydata = (tdata_t*)data;
size_t j;
for(j = 0; j < mydata->in.w; ++j) {
_process(A(mydata->in, mydata->i, j), &A(mydata->out, mydata->i, j));
}
free(mydata);
}
/** apply a colour transformation based on relationships between colour
components (RGB)
rules are added with recolour_addRule */
img_t recolour(img_t const img)
{
img_t ret = { img.w, img.h, (pixel_t*)malloc(img.w * img.h * sizeof(pixel_t)) };
size_t i, j;
#pragma omp parallel for
for(i = 0; i < img.h; ++i) {
tdata_t* data = (tdata_t*)malloc(sizeof(tdata_t));
data->i = i;
data->in = img;
data->out = ret;
_tprocess(data);
}
return ret;
}
|
common.h | #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <cstdio>
#include <string>
#include <vector>
#include <sstream>
#include <cstdint>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <iterator>
#include <type_traits>
#include <iomanip>
namespace LightGBM {
namespace Common {
inline char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string& Trim(std::string& str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string& RemoveQuotationSymbol(std::string& str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::string FindFromLines(const std::vector<std::string>& lines, const char* key_word) {
for (auto& line : lines) {
size_t find_pos = line.find(key_word);
if (find_pos != std::string::npos) {
return line;
}
}
return "";
}
inline static const char* Atoi(const char* p, int* out) {
int sign, value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = sign * value;
while (*p == ' ') {
++p;
}
return p;
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
++p;
while (*p >= '0' && *p <= '9') {
value += (*p - '0') / pow10;
pow10 *= 10.0;
++p;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret;
for (size_t i = 0; i < arr.size(); ++i) {
ret.push_back(static_cast<T2>(arr[i]));
}
return ret;
}
template<typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, char delimiter) {
if (arr.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << arr[0];
for (size_t i = 1; i < arr.size(); ++i) {
str_buf << delimiter;
str_buf << arr[i];
}
return str_buf.str();
}
template<typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, size_t n, char delimiter) {
if (arr.empty() || n == 0) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << arr[0];
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
str_buf << delimiter;
str_buf << arr[i];
}
return str_buf.str();
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
return static_cast<T>(std::stoll(str));
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter, size_t n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), delimiter);
if (strs.size() != n) {
Log::Fatal("StringToArray error, size doesn't match.");
}
std::vector<T> ret(n);
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (size_t i = 0; i < n; ++i) {
ret[i] = helper(strs[i]);
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
static inline int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformaton on p_rec
* \param p_rec The input/output vector of the values.
*/
inline void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
}
return ret;
}
template<typename T1, typename T2>
inline void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys.size(); ++i) {
arr.emplace_back(keys[i], values[i]);
}
if (!is_reverse) {
std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys[i] = arr[i].first;
values[i] = arr[i].second;
}
}
/*
* approximate hessians of absolute loss with Gaussian function
* cf. https://en.wikipedia.org/wiki/Gaussian_function
*
* y is a prediction.
* t means true target.
* g means gradient.
* eta is a parameter to control the width of Gaussian function.
* w means weights.
*/
inline static double ApproximateHessianWithGaussian(const double y, const double t, const double g,
const double eta, const double w=1.0f) {
const double diff = y - t;
const double pi = 4.0 * std::atan(1.0);
const double x = std::fabs(diff);
const double a = 2.0 * std::fabs(g) * w; // difference of two first derivatives, (zero to inf) and (zero to -inf).
const double b = 0.0;
const double c = std::max((std::fabs(y) + std::fabs(t)) * eta, 1.0e-10);
return w * std::exp(-(x - b) * (x - b) / (2.0 * c * c)) * a / (c * std::sqrt(2 * pi));
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) {
std::vector<T*> ptr(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ptr[i] = data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (x >= 1e300) {
return 1e300;
} else if(x <= -1e300) {
return -1e300;
} else {
return x;
}
}
template<class _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<class _RanIt, class _Pr, class _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<class _RanIt, class _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
inline void check_elements_interval_closed(const float *y, float ymin, float ymax, int ny, const char *callername) {
for (int i = 0; i < ny; ++i) {
if (y[i] < ymin || y[i] > ymax) {
Log::Fatal("[%s]: does not tolerate element [#%i = %f] outside [%f, %f]", callername, i, y[i], ymin, ymax);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
inline void obtain_min_max_sum(const float *w, int nw, float *mi, float *ma, double *su) {
float minw = w[0];
float maxw = w[0];
double sumw = static_cast<double>(w[0]);
for (int i = 1; i < nw; ++i) {
sumw += w[i];
if (w[i] < minw) minw = w[i];
if (w[i] > maxw) maxw = w[i];
}
if (mi != nullptr) *mi = minw;
if (ma != nullptr) *ma = maxw;
if (su != nullptr) *su = sumw;
}
template<class T>
inline std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<class T>
inline bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
} // namespace Common
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
bounds.c | /*---------------------------------------------------------------------------------
BOUNDS.C
-Implements physical boundary conditions
-Ensure no inflow at radial boundaries
-Ensure radial mass flux at radial boundaries is zero
-B2 flux at X1 and X3 faces at polar boundaries is reflected for ghost zones
-All X2 fluxes at polar boundaries are zeroed
---------------------------------------------------------------------------------*/
#include "decs.h"
// Sanity checks: grid dimensions, supported boundary conditions
#if N2 > 1 && N2 < NG
#error "N2 must be >= NG"
#endif
#if X1L_BOUND != PERIODIC && X1L_BOUND != OUTFLOW
#error "Unsupported X1L_BOUND"
#endif
#if X1R_BOUND != PERIODIC && X1R_BOUND != OUTFLOW && X1R_BOUND != USER
#error "Unsupported X1R_BOUND"
#endif
#if X2L_BOUND != PERIODIC && X2L_BOUND != OUTFLOW && X2L_BOUND != POLAR
#error "Unsupported X2L_BOUND"
#endif
#if X2R_BOUND != PERIODIC && X2R_BOUND != OUTFLOW && X2R_BOUND != POLAR
#error "Unsupported X2R_BOUND"
#endif
void inflow_check(struct GridGeom *G, struct FluidState *S, int i, int j, int type);
// Apply boundary conditions along X1 and X2
void set_bounds(struct GridGeom *G, struct FluidState *S)
{
timer_start(TIMER_BOUND);
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
{
ISLOOP(-NG, -1)
{
#if N1 < NG
int iactive = NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iactive];
pflag[j][i] = pflag[j][iactive];
#elif X1L_BOUND == OUTFLOW
int iz = 0 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
double rescale = G->gdet[CENT][j][iz]/G->gdet[CENT][j][i];
S->P[B1][j][i] *= rescale;
S->P[B2][j][i] *= rescale;
S->P[B3][j][i] *= rescale;
#elif X1L_BOUND == PERIODIC
int iz = N1 + i;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
#endif
}
}
#if METRIC == MKS
if(X1L_INFLOW == 0)
{
// Make sure there is no inflow at the inner boundary
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
ISLOOP(-NG, -1)
inflow_check(G, S, i, j, 0);
}
#endif
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
{
ISLOOP(N1, N1 - 1 + NG)
{
#if N1 < NG
int iactive = N1 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iactive];
pflag[j][i] = pflag[j][iactive];
#elif X1R_BOUND == OUTFLOW
int iz = N1 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
double rescale = G->gdet[CENT][j][iz]/G->gdet[CENT][j][i];
S->P[B1][j][i] *= rescale;
S->P[B2][j][i] *= rescale;
S->P[B3][j][i] *= rescale;
#elif X1R_BOUND == USER
bound_gas_prob_x1r(i, j, S->P, G);
#elif X1R_BOUND == PERIODIC
int iz = i - N1;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
#endif
}
}
#if METRIC == MKS
if(X1R_INFLOW == 0)
{
// Make sure there is no inflow at the outer boundary
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
ISLOOP(N1, N1 - 1 + NG)
inflow_check(G, S, i, j, 1);
}
#endif
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
JSLOOP(-NG, -1)
{
#if N2 < NG
int jactive = NG;
PLOOP S->P[ip][j][i] = S->P[ip][jactive][i];
pflag[j][i] = pflag[jactive][i];
#elif X2L_BOUND == OUTFLOW
int jz = 0 + NG ;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#elif X2L_BOUND == POLAR
// Reflect the zone past NG by NG-j
int jrefl = NG + (NG - j) - 1;
PLOOP S->P[ip][j][i] = S->P[ip][jrefl][i];
pflag[j][i] = pflag[jrefl][i];
S->P[U2][j][i] *= -1.;
S->P[B2][j][i] *= -1.;
#elif X2L_BOUND == PERIODIC
int jz = N2 + j;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#endif
}
}
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
JSLOOP(N2, N2-1+NG)
{
#if N2 < NG
int jactive = N2 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][jactive][i];
pflag[j][i] = pflag[jactive][i];
#elif X2R_BOUND == OUTFLOW
int jz = N2 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#elif X2R_BOUND == POLAR
// As j grows beyond N2+NG, reflect the zone that far previous
int jrefl = (N2 + NG) + (N2 + NG - j) - 1;
PLOOP S->P[ip][j][i] = S->P[ip][jrefl][i];
pflag[j][i] = pflag[jrefl][i];
S->P[U2][j][i] *= -1.;
S->P[B2][j][i] *= -1.;
#elif X2R_BOUND == PERIODIC
int jz = j - N2;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#endif
}
}
timer_stop(TIMER_BOUND);
}
#if METRIC == MKS
void inflow_check(struct GridGeom *G, struct FluidState *S, int i, int j, int type)
{
double alpha, beta1, vsq;
ucon_calc(G, S, i, j, CENT);
if (((S->ucon[1][j][i] > 0.) && (type == 0)) ||
((S->ucon[1][j][i] < 0.) && (type == 1)))
{
// Find gamma and remove it from S->Pitives
double gamma = mhd_gamma_calc(G, S, i, j, CENT);
S->P[U1][j][i] /= gamma;
S->P[U2][j][i] /= gamma;
S->P[U3][j][i] /= gamma;
alpha = G->lapse[CENT][j][i];
beta1 = G->gcon[CENT][0][1][j][i]*alpha*alpha;
// Reset radial velocity so radial 4-velocity is zero
S->P[U1][j][i] = beta1/alpha;
// Now find new gamma and put it back in
vsq = 0.;
for (int mu = 1; mu < NDIM; mu++)
{
for (int nu = 1; nu < NDIM; nu++)
{
vsq += G->gcov[CENT][mu][nu][j][i]*S->P[U1+mu-1][j][i]*S->P[U1+nu-1][j][i];
}
}
if (fabs(vsq) < 1.e-13)
vsq = 1.e-13;
if (vsq >= 1.)
vsq = 1. - 1./(GAMMAMAX*GAMMAMAX);
gamma = 1./sqrt(1. - vsq);
S->P[U1][j][i] *= gamma;
S->P[U2][j][i] *= gamma;
S->P[U3][j][i] *= gamma;
}
}
void fix_flux(struct FluidFlux *F)
{
if (X1L_INFLOW == 0)
{
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOPALL
F->X1[RHO][j][0+NG] = MY_MIN(F->X1[RHO][j][0+NG], 0.);
}
if (X1R_INFLOW == 0)
{
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOPALL
F->X1[RHO][j][N1+NG] = MY_MAX(F->X1[RHO][j][N1+NG], 0.);
}
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
F->X1[B2][-1+NG][i] = -F->X1[B2][0+NG][i];
PLOOP F->X2[ip][0+NG][i] = 0.;
}
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
F->X1[B2][N2+NG][i] = -F->X1[B2][N2-1+NG][i];
PLOOP F->X2[ip][N2+NG][i] = 0.;
}
}
#endif // METRIC
|
knapsack.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/*
* Original code from the Cilk project
*
* Copyright (c) 2000 Massachusetts Institute of Technology
* Copyright (c) 2000 Matteo Frigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <string.h>
#include "app-desc.h"
#include "bots.h"
int best_so_far;
int number_of_tasks;
#pragma omp threadprivate(number_of_tasks)
int compare(struct item *a, struct item *b)
{
double c = ((double) a->value / a->weight) -
((double) b->value / b->weight);
if (c > 0) return -1;
if (c < 0) return 1;
return 0;
}
int read_input(const char *filename, struct item *items, int *capacity, int *n)
{
int i;
FILE *f;
if (filename == NULL) filename = "\0";
f = fopen(filename, "r");
if (f == NULL) {
fprintf(stderr, "open_input(\"%s\") failed\n", filename);
return -1;
}
/* format of the input: #items capacity value1 weight1 ... */
fscanf(f, "%d", n);
fscanf(f, "%d", capacity);
for (i = 0; i < *n; ++i)
fscanf(f, "%d %d", &items[i].value, &items[i].weight);
fclose(f);
/* sort the items on decreasing order of value/weight */
/* cilk2c is fascist in dealing with pointers, whence the ugly cast */
qsort(items, *n, sizeof(struct item), (int (*)(const void *, const void *)) compare);
return 0;
}
/*
* return the optimal solution for n items (first is e) and
* capacity c. Value so far is v.
*/
#if defined(IF_CUTOFF)
void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l)
{
int with, without, best;
double ub;
number_of_tasks++;
/* base case: full knapsack or no items */
if (c < 0)
{
*sol = INT_MIN;
return;
}
/* feasible solution, with value v */
if (n == 0 || c == 0)
{
*sol = v;
return;
}
ub = (double) v + c * e->value / e->weight;
if (ub < best_so_far) {
/* prune ! */
*sol = INT_MIN;
return;
}
/*
* compute the best solution without the current item in the knapsack
*/
#pragma omp task untied firstprivate(e,c,n,v,l) shared(without) if (l < bots_cutoff_value)
knapsack_par(e + 1, c, n - 1, v, &without,l+1);
/* compute the best solution with the current item in the knapsack */
#pragma omp task untied firstprivate(e,c,n,v,l) shared(with) if (l < bots_cutoff_value)
knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1);
#pragma omp taskwait
best = with > without ? with : without;
/*
* notice the race condition here. The program is still
* correct, in the sense that the best solution so far
* is at least best_so_far. Moreover best_so_far gets updated
* when returning, so eventually it should get the right
* value. The program is highly non-deterministic.
*/
if (best > best_so_far) best_so_far = best;
*sol = best;
}
#elif defined (MANUAL_CUTOFF)
void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l)
{
int with, without, best;
double ub;
number_of_tasks++;
/* base case: full knapsack or no items */
if (c < 0)
{
*sol = INT_MIN;
return;
}
/* feasible solution, with value v */
if (n == 0 || c == 0)
{
*sol = v;
return;
}
ub = (double) v + c * e->value / e->weight;
if (ub < best_so_far) {
/* prune ! */
*sol = INT_MIN;
return;
}
if (l < bots_cutoff_value)
{
/* compute the best solution without the current item in the knapsack */
#pragma omp task untied firstprivate(e,c,n,v,l) shared(without)
knapsack_par(e + 1, c, n - 1, v, &without,l+1);
/* compute the best solution with the current item in the knapsack */
#pragma omp task untied firstprivate(e,c,n,v,l) shared(with)
knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1);
#pragma omp taskwait
}
else
{
/* compute the best solution without the current item in the knapsack */
knapsack_seq(e + 1, c, n - 1, v, &without);
/* compute the best solution with the current item in the knapsack */
knapsack_seq(e + 1, c - e->weight, n - 1, v + e->value, &with);
}
best = with > without ? with : without;
/*
* notice the race condition here. The program is still
* correct, in the sense that the best solution so far
* is at least best_so_far. Moreover best_so_far gets updated
* when returning, so eventually it should get the right
* value. The program is highly non-deterministic.
*/
if (best > best_so_far) best_so_far = best;
*sol = best;
}
#else
void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l)
{
int with, without, best;
double ub;
number_of_tasks++;
/* base case: full knapsack or no items */
if (c < 0)
{
*sol = INT_MIN;
return;
}
/* feasible solution, with value v */
if (n == 0 || c == 0)
{
*sol = v;
return;
}
ub = (double) v + c * e->value / e->weight;
if (ub < best_so_far) {
/* prune ! */
*sol = INT_MIN;
return;
}
/*
* compute the best solution without the current item in the knapsack
*/
#pragma omp task untied firstprivate(e,c,n,v,l) shared(without)
knapsack_par(e + 1, c, n - 1, v, &without,l+1);
/* compute the best solution with the current item in the knapsack */
#pragma omp task untied firstprivate(e,c,n,v,l) shared(with)
knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1);
#pragma omp taskwait
best = with > without ? with : without;
/*
* notice the race condition here. The program is still
* correct, in the sense that the best solution so far
* is at least best_so_far. Moreover best_so_far gets updated
* when returning, so eventually it should get the right
* value. The program is highly non-deterministic.
*/
if (best > best_so_far) best_so_far = best;
*sol = best;
}
#endif
void knapsack_seq(struct item *e, int c, int n, int v, int *sol)
{
int with, without, best;
double ub;
number_of_tasks++;
/* base case: full knapsack or no items */
if (c < 0)
{
*sol = INT_MIN;
return;
}
/* feasible solution, with value v */
if (n == 0 || c == 0)
{
*sol = v;
return;
}
ub = (double) v + c * e->value / e->weight;
if (ub < best_so_far) {
/* prune ! */
*sol = INT_MIN;
return;
}
/*
* compute the best solution without the current item in the knapsack
*/
knapsack_seq(e + 1, c, n - 1, v, &without);
/* compute the best solution with the current item in the knapsack */
knapsack_seq(e + 1, c - e->weight, n - 1, v + e->value, &with);
best = with > without ? with : without;
/*
* notice the race condition here. The program is still
* correct, in the sense that the best solution so far
* is at least best_so_far. Moreover best_so_far gets updated
* when returning, so eventually it should get the right
* value. The program is highly non-deterministic.
*/
if (best > best_so_far) best_so_far = best;
*sol = best;
}
void knapsack_main_par (struct item *e, int c, int n, int v, int *sol)
{
best_so_far = INT_MIN;
#pragma omp parallel
{
number_of_tasks = 0;
#pragma omp single
#pragma omp task untied
{
knapsack_par(e, c, n, 0, sol, 0);
}
#pragma omp critical
bots_number_of_tasks += number_of_tasks;
}
if (bots_verbose_mode) printf("Best value for parallel execution is %d\n\n", *sol);
}
void knapsack_main_seq (struct item *e, int c, int n, int v, int *sol)
{
best_so_far = INT_MIN;
number_of_tasks = 0;
knapsack_seq(e, c, n, 0, sol);
if (bots_verbose_mode) printf("Best value for sequential execution is %d\n\n", *sol);
}
int knapsack_check (int sol_seq, int sol_par)
{
if (sol_seq == sol_par) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
m_area_auto_covariance.h | //
// Created by Harold on 2021/6/28.
//
#ifndef M_MATH_M_AREA_AUTO_COVARIANCE_H
#define M_MATH_M_AREA_AUTO_COVARIANCE_H
#include <opencv2/core.hpp>
#include <omp.h>
namespace M_MATH {
template<typename T>
cv::Mat_<T> AreaAutoCovariance(cv::Mat_<T> const& I) {
static_assert(std::is_floating_point<T>::value, "T should be floating point");
auto ny = I.rows;
auto nx = I.cols;
cv::Mat_<T> AACV(ny, nx, T{});
#pragma omp parallel for collapse(2)
for (auto ty = 0; ty < ny; ty++)
for (auto tx = 0; tx < nx; tx++) {
auto tmp = T{};
for (auto j = 0; j < ny - ty; j++)
for (auto i = 0; i < nx - tx; i++)
if (j + ty - 1 < 0 || i + tx - 1 < 0) continue;
else tmp = tmp + I.at<T>(j, i) * I.at<T>(j + ty - 1, i + tx - 1); // I(-1, -1) is invalid, which means AACV row_0 and col_0 are invalid
AACV.at<T>(ty, tx) = tmp;
}
AACV /= (nx * ny);
// translate to remove invalid row_0 and col_0
//cv::Mat trans_mat = (cv::Mat_<T>(2,3) << 1, 0, -1, 0, 1, -1);
//cv::warpAffine(AACV, AACV, trans_mat, AACV.size());
// copy to remove invalid row_0 and col_0
AACV(cv::Rect(1, 1, I.cols - 1, I.rows - 1)).copyTo(AACV(cv::Rect(0, 0, I.cols - 1, I.rows - 1)));
AACV.row(I.rows - 1) = 0;
AACV.col(I.cols - 1) = 0;
return AACV;
}
}
#endif //M_MATH_M_AREA_AUTO_COVARIANCE_H |
default_solvers.c | GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SOLVE_MODEL_ODES(solve_model_odes_cpu) {
uint32_t sv_id;
size_t num_cells_to_solve = ode_solver->num_cells_to_solve;
uint32_t * cells_to_solve = ode_solver->cells_to_solve;
real *sv = ode_solver->sv;
real dt = ode_solver->min_dt;
uint32_t num_steps = ode_solver->num_steps;
bool adpt = ode_solver->adaptive;
#pragma omp parallel for private(sv_id)
for (u_int32_t i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
if(adpt) {
solve_forward_euler_cpu_adpt(sv + (sv_id * NEQ), stim_currents[i], current_t + dt, sv_id, ode_solver);
}
else {
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = dt * rDY[i] + rY[i];
}
void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int sv_id, struct ode_solver *solver) {
const real _beta_safety_ = 0.8;
int numEDO = NEQ;
real rDY[numEDO];
real _tolerances_[numEDO];
real _aux_tol = 0.0;
// initializes the variables
solver->ode_previous_dt[sv_id] = solver->ode_dt[sv_id];
real edos_old_aux_[numEDO];
real edos_new_euler_[numEDO];
real *_k1__ = (real *)malloc(sizeof(real) * numEDO);
real *_k2__ = (real *)malloc(sizeof(real) * numEDO);
real *_k_aux__;
real *dt = &solver->ode_dt[sv_id];
real *time_new = &solver->ode_time_new[sv_id];
real *previous_dt = &solver->ode_previous_dt[sv_id];
if(*time_new + *dt > final_time) {
*dt = final_time - *time_new;
}
RHS_cpu(sv, rDY, stim_curr, *dt);
*time_new += *dt;
for(int i = 0; i < numEDO; i++) {
_k1__[i] = rDY[i];
}
const real rel_tol = solver->rel_tol;
const real abs_tol = solver->abs_tol;
const real __tiny_ = pow(abs_tol, 2.0);
real min_dt = solver->min_dt;
real max_dt = solver->max_dt;
while(1) {
for(int i = 0; i < numEDO; i++) {
// stores the old variables in a vector
edos_old_aux_[i] = sv[i];
// computes euler method
edos_new_euler_[i] = _k1__[i] * *dt + edos_old_aux_[i];
// steps ahead to compute the rk2 method
sv[i] = edos_new_euler_[i];
}
*time_new += *dt;
RHS_cpu(sv, rDY, stim_curr, *dt);
*time_new -= *dt; // step back
double greatestError = 0.0, auxError = 0.0;
for(int i = 0; i < numEDO; i++) {
// stores the new evaluation
_k2__[i] = rDY[i];
_aux_tol = fabs(edos_new_euler_[i]) * rel_tol;
_tolerances_[i] = (abs_tol > _aux_tol) ? abs_tol : _aux_tol;
// finds the greatest error between the steps
auxError = fabs(((*dt / 2.0) * (_k1__[i] - _k2__[i])) / _tolerances_[i]);
greatestError = (auxError > greatestError) ? auxError : greatestError;
}
/// adapt the time step
greatestError += __tiny_;
*previous_dt = *dt;
/// adapt the time step
*dt = _beta_safety_ * (*dt) * sqrt(1.0f / greatestError);
if(*dt < min_dt) {
*dt = min_dt;
} else if(*dt > max_dt) {
*dt = max_dt;
}
if(*time_new + *dt > final_time) {
*dt = final_time - *time_new;
}
// it doesn't accept the solution
if(greatestError >= 1.0f && *dt > min_dt) {
// restore the old values to do it again
for(int i = 0; i < numEDO; i++) {
sv[i] = edos_old_aux_[i];
}
// throw the results away and compute again
} else {
// it accepts the solutions
if(greatestError >= 1.0) {
printf("Accepting solution with error > %lf \n", greatestError);
}
_k_aux__ = _k2__;
_k2__ = _k1__;
_k1__ = _k_aux__;
// it steps the method ahead, with euler solution
for(int i = 0; i < numEDO; i++) {
sv[i] = edos_new_euler_[i];
}
if(*time_new + *previous_dt >= final_time) {
if(final_time == *time_new) {
break;
} else if(*time_new < final_time) {
*dt = *previous_dt = final_time - *time_new;
*time_new += *previous_dt;
break;
}
} else {
*time_new += *previous_dt;
}
}
}
free(_k1__);
free(_k2__);
}
|
icd3d.c |
#include <math.h>
#include <stdio.h>
#include <time.h>
#include <omp.h>
#include "icd3d.h"
#include "allocate.h"
void ICDStep3DCone(struct Sino *sino, struct Image *img, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct ReconAux *reconAux)
{
/**
* Updates one voxel. Voxel change is stored in icdInfo->Delta_xj.
*/
/**
* Compute forward model term of theta1 and theta2:
*
* theta1_f = -e^t W A_{*,j}
* theta2_f = A_{*,j}^t W A _{*,j}
*/
computeTheta1Theta2ForwardTerm(sino, A, icdInfo, reconParams);
/**
* Compute prior model term of theta1 and theta2:
*
*/
if(reconParams->priorWeight_QGGMRF >= 0)
computeTheta1Theta2PriorTermQGGMRF(icdInfo, reconParams);
if(reconParams->priorWeight_proxMap >= 0)
computeTheta1Theta2PriorTermProxMap(icdInfo, reconParams);
computeDeltaXjAndUpdate(icdInfo, reconParams, img, reconAux);
updateErrorSinogram(sino, A, icdInfo);
}
void prepareICDInfo(long int j_x, long int j_y, long int j_z, struct ICDInfo3DCone *icdInfo, struct Image *img, struct ReconAux *reconAux, struct ReconParams *reconParams)
{
icdInfo->old_xj = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)];
if(reconParams->priorWeight_proxMap >= 0)
icdInfo->proxMapInput_j = img->proxMapInput[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)];
icdInfo->j_x = j_x;
icdInfo->j_y = j_y;
icdInfo->j_z = j_z;
extractNeighbors(icdInfo, img, reconParams);
icdInfo->theta1_f = 0;
icdInfo->theta2_f = 0;
icdInfo->theta1_p_QGGMRF = 0;
icdInfo->theta2_p_QGGMRF = 0;
icdInfo->theta1_p_proxMap = 0;
icdInfo->theta2_p_proxMap = 0;
}
void extractNeighbors(struct ICDInfo3DCone *icdInfo, struct Image *img, struct ReconParams *reconParams)
{
long int j_x, j_y, j_z;
long int N_x, N_y, N_z;
long int PLx, MIx;
long int PLy, MIy;
long int PLz, MIz;
j_x = icdInfo->j_x;
j_y = icdInfo->j_y;
j_z = icdInfo->j_z;
N_x = img->params.N_x;
N_y = img->params.N_y;
N_z = img->params.N_z;
/**
* Use reflective boundary conditions to find the indices of the neighbors
*/
PLx = (j_x == N_x-1) ? N_x-2 : j_x+1;
PLy = (j_y == N_y-1) ? N_y-2 : j_y+1;
PLz = (j_z == N_z-1) ? N_z-2 : j_z+1;
MIx = (j_x == 0) ? 1 : j_x-1;
MIy = (j_y == 0) ? 1 : j_y-1;
MIz = (j_z == 0) ? 1 : j_z-1;
/**
* Compute the neighbor pixel values
*
* Note that all the pixels of the first half of the arrays
* have a corresponding pixel in the second half of the array
* that is on the spacially opposite side.
* Example: neighborsFace[0] opposite of neighborsFace[3]
*/
if (reconParams->bFace>=0)
{
/* Face Neighbors (primal) */
//icdInfo->neighborsFace[0] = img->vox[PLx][j_y][j_z];
//icdInfo->neighborsFace[1] = img->vox[j_x][PLy][j_z];
//icdInfo->neighborsFace[2] = img->vox[j_x][j_y][PLz];
icdInfo->neighborsFace[0] = img->vox[index_3D(PLx,j_y,j_z,img->params.N_y,img->params.N_z)];
icdInfo->neighborsFace[1] = img->vox[index_3D(j_x,PLy,j_z,img->params.N_y,img->params.N_z)];
icdInfo->neighborsFace[2] = img->vox[index_3D(j_x,j_y,PLz,img->params.N_y,img->params.N_z)];
/* Face Neighbors (opposite) */
//icdInfo->neighborsFace[3] = img->vox[MIx][j_y][j_z];
//icdInfo->neighborsFace[4] = img->vox[j_x][MIy][j_z];
//icdInfo->neighborsFace[5] = img->vox[j_x][j_y][MIz];
icdInfo->neighborsFace[3] = img->vox[index_3D(MIx,j_y,j_z,img->params.N_y,img->params.N_z)];
icdInfo->neighborsFace[4] = img->vox[index_3D(j_x,MIy,j_z,img->params.N_y,img->params.N_z)];
icdInfo->neighborsFace[5] = img->vox[index_3D(j_x,j_y,MIz,img->params.N_y,img->params.N_z)];
}
if (reconParams->bEdge>=0)
{
/* Edge Neighbors (primal) */
//icdInfo->neighborsEdge[ 0] = img->vox[j_x][PLy][PLz];
//icdInfo->neighborsEdge[ 1] = img->vox[j_x][PLy][MIz];
//icdInfo->neighborsEdge[ 2] = img->vox[PLx][j_y][PLz];
//icdInfo->neighborsEdge[ 3] = img->vox[PLx][j_y][MIz];
//icdInfo->neighborsEdge[ 4] = img->vox[PLx][PLy][j_z];
//icdInfo->neighborsEdge[ 5] = img->vox[PLx][MIy][j_z];
icdInfo->neighborsEdge[0] = img->vox[index_3D(j_x,PLy,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[1] = img->vox[index_3D(j_x,PLy,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[2] = img->vox[index_3D(PLx,j_y,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[3] = img->vox[index_3D(PLx,j_y,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[4] = img->vox[index_3D(PLx,PLy,j_z,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[5] = img->vox[index_3D(PLx,MIy,j_z,img->params.N_y,img->params.N_z)];
/* Edge Neighbors (opposite) */
//icdInfo->neighborsEdge[ 6] = img->vox[j_x][MIy][MIz];
//icdInfo->neighborsEdge[ 7] = img->vox[j_x][MIy][PLz];
//icdInfo->neighborsEdge[ 8] = img->vox[MIx][j_y][MIz];
//icdInfo->neighborsEdge[ 9] = img->vox[MIx][j_y][PLz];
//icdInfo->neighborsEdge[10] = img->vox[MIx][MIy][j_z];
//icdInfo->neighborsEdge[11] = img->vox[MIx][PLy][j_z];
icdInfo->neighborsEdge[6] = img->vox[index_3D(j_x,MIy,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[7] = img->vox[index_3D(j_x,MIy,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[8] = img->vox[index_3D(MIx,j_y,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[9] = img->vox[index_3D(MIx,j_y,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[10] = img->vox[index_3D(MIx,MIy,j_z,img->params.N_y,img->params.N_z)];
icdInfo->neighborsEdge[11] = img->vox[index_3D(MIx,PLy,j_z,img->params.N_y,img->params.N_z)];
}
if (reconParams->bVertex>=0)
{
/* Vertex Neighbors (primal) */
//icdInfo->neighborsVertex[0] = img->vox[PLx][PLy][PLz];
//icdInfo->neighborsVertex[1] = img->vox[PLx][PLy][MIz];
//icdInfo->neighborsVertex[2] = img->vox[PLx][MIy][PLz];
//icdInfo->neighborsVertex[3] = img->vox[PLx][MIy][MIz];
icdInfo->neighborsVertex[0] = img->vox[index_3D(PLx,PLy,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsVertex[1] = img->vox[index_3D(PLx,PLy,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsVertex[2] = img->vox[index_3D(PLx,MIy,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsVertex[3] = img->vox[index_3D(PLx,MIy,MIz,img->params.N_y,img->params.N_z)];
/* Vertex Neighbors (opposite) */
//icdInfo->neighborsVertex[4] = img->vox[MIx][MIy][MIz];
//icdInfo->neighborsVertex[5] = img->vox[MIx][MIy][PLz];
//icdInfo->neighborsVertex[6] = img->vox[MIx][PLy][MIz];
//icdInfo->neighborsVertex[7] = img->vox[MIx][PLy][PLz];
icdInfo->neighborsVertex[4] = img->vox[index_3D(MIx,MIy,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsVertex[5] = img->vox[index_3D(MIx,MIy,PLz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsVertex[6] = img->vox[index_3D(MIx,PLy,MIz,img->params.N_y,img->params.N_z)];
icdInfo->neighborsVertex[7] = img->vox[index_3D(MIx,PLy,PLz,img->params.N_y,img->params.N_z)];
}
}
void computeTheta1Theta2ForwardTerm(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams)
{
/**
* Compute forward model term of theta1 and theta2:
*
* theta1_f = -e^t W A_{*,j}
* theta2_f = A_{*,j}^t W A _{*,j}
*/
long int i_beta, i_v, i_w;
long int j_x, j_y, j_z, j_u;
float B_ij, A_ij;
j_x = icdInfo->j_x;
j_y = icdInfo->j_y;
j_z = icdInfo->j_z;
for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta)
{
j_u = A->j_u[j_x][j_y][i_beta];
for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v)
{
B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]];
for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w)
{
A_ij = B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]];
icdInfo->theta1_f -=
sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* A_ij;
icdInfo->theta2_f +=
A_ij
* sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* A_ij;
}
}
}
if(strcmp(reconParams->weightScaler_domain,"spatiallyInvariant") == 0)
{
icdInfo->theta1_f /= sino->params.weightScaler_value;
icdInfo->theta2_f /= sino->params.weightScaler_value;
}
else
{
fprintf(stderr, "ERROR in computeTheta1Theta2ForwardTerm: can't recongnize weightScaler_domain.\n");
exit(-1);
}
}
void computeTheta1Theta2PriorTermQGGMRF(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams)
{
/**
* Compute prior model term of theta1 and theta2:
*
* theta1_p_QGGMRF = sum 2 b_{j,r} * surrCoeff(x_j - x_r) * (x_j - x_r)
* {r E ∂j}
*
* theta2_p_QGGMRF = sum 2 b_{j,r} * surrCoeff(x_j - x_r)
* {r E ∂j}
*/
int i;
float delta, surrogateCoeff;
float sum1Face = 0;
float sum1Edge = 0;
float sum1Vertex = 0;
float sum2Face = 0;
float sum2Edge = 0;
float sum2Vertex = 0;
if (reconParams->bFace>=0)
{
for (i = 0; i < 6; ++i)
{
delta = icdInfo->old_xj - icdInfo->neighborsFace[i];
surrogateCoeff = surrogateCoeffQGGMRF(delta, reconParams);
sum1Face += surrogateCoeff * delta;
sum2Face += surrogateCoeff;
}
}
if (reconParams->bEdge>=0)
{
for (i = 0; i < 12; ++i)
{
delta = icdInfo->old_xj - icdInfo->neighborsEdge[i];
surrogateCoeff = surrogateCoeffQGGMRF(delta, reconParams);
sum1Edge += surrogateCoeff * delta;
sum2Edge += surrogateCoeff;
}
}
if (reconParams->bVertex>=0)
{
for (i = 0; i < 8; ++i)
{
delta = icdInfo->old_xj - icdInfo->neighborsVertex[i];
surrogateCoeff = surrogateCoeffQGGMRF(delta, reconParams);
sum1Vertex += surrogateCoeff * delta;
sum2Vertex += surrogateCoeff;
}
}
icdInfo->theta1_p_QGGMRF = 2 * reconParams->bFace * sum1Face
+ 2 * reconParams->bEdge * sum1Edge
+ 2 * reconParams->bVertex * sum1Vertex;
icdInfo->theta2_p_QGGMRF = 2 * reconParams->bFace * sum2Face
+ 2 * reconParams->bEdge * sum2Edge
+ 2 * reconParams->bVertex * sum2Vertex;
}
void computeTheta1Theta2PriorTermProxMap(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams)
{
/**
* theta1_p_proxMap = (x_j - ~x_j) / (sigma_lambda^2)
*
*
* theta2_p_proxMap = 1 / (sigma_lambda^2)
*
*/
icdInfo->theta1_p_proxMap = (icdInfo->old_xj - icdInfo->proxMapInput_j) / (reconParams->sigma_lambda * reconParams->sigma_lambda);
icdInfo->theta2_p_proxMap = 1.0 / (reconParams->sigma_lambda * reconParams->sigma_lambda);
}
float surrogateCoeffQGGMRF(float Delta, struct ReconParams *reconParams)
{
/**
* / rho'(Delta) / (2 Delta) if Delta != 0
* surrCoeff(Delta) = {
* \ rho''(0) / 2 if Delta = 0
*/
float p, q, T, sigmaX, qmp;
float num, denom, temp;
p = reconParams->p;
q = reconParams->q;
T = reconParams->T;
sigmaX = reconParams->sigmaX;
qmp = q - p;
if(fabs(Delta) < 1e-5)
{
/**
* rho''(0) 1
* -------- = -----------------
* 2 p sigmaX^q T^(q-p)
*/
return 1.0 / ( p * pow(sigmaX, q) * pow(T, qmp) );
}
else /* Delta != 0 */
{
/**
* rho'(Delta) |Delta|^(p-2) # (q/p + #)
* ----------- = ------------- ------------
* 2 Delta 2 sigmaX^p (1 + #)^2
*
* where | Delta |^(q-p)
* # = |--------|
* |T sigmaX|
*/
temp = pow(fabs(Delta / (T*sigmaX)), qmp); /* this is the # from above */
num = pow(fabs(Delta), p-2) * temp * (q/p + temp);
denom = 2 * pow(sigmaX,p) * (1.0 + temp) * (1.0 + temp);
return num / denom;
}
}
void updateErrorSinogram(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo)
{
/**
* Update error sinogram
*
* e <- e - A_{*,j} * Delta_xj
*/
long int i_beta, i_v, i_w;
long int j_x, j_y, j_z, j_u;
float B_ij;
j_x = icdInfo->j_x;
j_y = icdInfo->j_y;
j_z = icdInfo->j_z;
for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta)
{
j_u = A->j_u[j_x][j_y][i_beta];
for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v)
{
B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]];
for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w)
{
sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] -=
B_ij
* A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]]
* icdInfo->Delta_xj;
}
}
}
}
void updateIterationStats(struct ReconAux *reconAux, struct ICDInfo3DCone *icdInfo, struct Image *img)
{
reconAux->TotalValueChange += fabs(icdInfo->Delta_xj);
//reconAux->TotalVoxelValue += _MAX_(img->vox[icdInfo->j_x][icdInfo->j_y][icdInfo->j_z], icdInfo->old_xj);
reconAux->TotalVoxelValue += _MAX_(img->vox[index_3D(icdInfo->j_x,icdInfo->j_y,icdInfo->j_z,img->params.N_y,img->params.N_z)], icdInfo->old_xj);
reconAux->NumUpdatedVoxels++;
}
void resetIterationStats(struct ReconAux *reconAux)
{
reconAux->TotalValueChange = 0;
reconAux->TotalVoxelValue = 0;
reconAux->NumUpdatedVoxels = 0;
}
void RandomAux_ShuffleOrderXYZ(struct RandomAux *aux, struct ImageParams *params)
{
fprintf(stdout, "zipline mode 0\n");
shuffleLongIntArray(aux->orderXYZ, params->N_x * params->N_y * params->N_z);
}
void indexExtraction3D(long int j_xyz, long int *j_x, long int N_x, long int *j_y, long int N_y, long int *j_z, long int N_z)
{
/* j_xyz = j_z + N_z j_y + N_z N_y j_x */
long int j_temp;
j_temp = j_xyz; /* Now, j_temp = j_z + N_z j_y + N_z N_y j_x */
*j_z = j_temp % N_z;
j_temp = (j_temp-*j_z) / N_z; /* Now, j_temp = j_y + N_y j_x */
*j_y = j_temp % N_y;
j_temp = (j_temp-*j_y) / N_y; /* Now, j_temp = j_x */
*j_x = j_temp;
return;
}
float MAPCost3D(struct Sino *sino, struct Image *img, struct ReconParams *reconParams)
{
/**
* Computes MAP cost function
*/
float cost;
// Initialize cost with forward model cost
cost = MAPCostForward(sino);
// if prior is used, add prior cost
if(reconParams->priorWeight_QGGMRF >= 0)
cost += MAPCostPrior_QGGMRF(img, reconParams);
// if proximal map is used, add proximal map cost
if(reconParams->priorWeight_proxMap >= 0)
cost += MAPCostPrior_ProxMap(img, reconParams);
return cost;
}
float MAPCostForward(struct Sino *sino)
{
/**
* ForwardCost = 1/2 ||e||^{2}_{W}
*/
long int i_beta, i_v, i_w;
float cost;
cost = 0;
for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta)
{
for (i_v = 0; i_v < sino->params.N_dv; ++i_v)
{
for (i_w = 0; i_w < sino->params.N_dw; ++i_w)
{
cost += sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)];
}
}
}
return cost / (2.0 * sino->params.weightScaler_value);
}
float MAPCostPrior_QGGMRF(struct Image *img, struct ReconParams *reconParams)
{
/**
* cost = sum b_{s,r} rho(x_s-x_r)
* {s,r} E P
*/
long int j_x, j_y, j_z;
struct ICDInfo3DCone icdInfo;
float cost;
float temp;
cost = 0;
for (j_x = 0; j_x < img->params.N_x; ++j_x)
{
for (j_y = 0; j_y < img->params.N_y; ++j_y)
for (j_z = 0; j_z < img->params.N_z; ++j_z)
{
/**
* Prepare icdInfo
*/
icdInfo.j_x = j_x;
icdInfo.j_y = j_y;
icdInfo.j_z = j_z;
extractNeighbors(&icdInfo, img, reconParams);
icdInfo.old_xj = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)];
temp = MAPCostPrior_QGGMRFSingleVoxel_HalfNeighborhood(&icdInfo, reconParams);
cost += temp;
}
}
return cost * reconParams->priorWeight_QGGMRF;
}
float MAPCostPrior_ProxMap(struct Image *img, struct ReconParams *reconParams)
{
/**
* Compute proximal mapping prior cost
* 1 || ||2
* cost += ---------------- || x - x~ ||
* 2 sigma_lambda^2 || ||2
*
*/
long int j_x, j_y, j_z;
float cost, diff_voxel;
cost = 0;
for (j_x = 0; j_x < img->params.N_x; ++j_x)
{
for (j_y = 0; j_y < img->params.N_y; ++j_y)
{
for (j_z = 0; j_z < img->params.N_z; ++j_z)
{
//diff_voxel = img->vox[j_x][j_y][j_z] - img->proxMapInput[j_x][j_y][j_z];
diff_voxel = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)] - img->proxMapInput[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)];
cost += diff_voxel*diff_voxel*isInsideMask(j_x, j_y, img->params.N_x, img->params.N_y);
}
}
}
cost /= 2 * reconParams->sigma_lambda * reconParams->sigma_lambda;
return cost;
}
float MAPCostPrior_QGGMRFSingleVoxel_HalfNeighborhood(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams)
{
/**
* Compute prior model term of theta1 and theta2:
*
* cost += sum b_{j,r} * rho(x_j - x_r)
* {r E ∂j^half}
*
*/
int i;
float sum1Face, sum1Edge, sum1Vertex;
sum1Face = 0;
sum1Edge = 0;
sum1Vertex = 0;
if (reconParams->bFace>=0)
for (i = 0; i < 3; ++i) /* Note: only use first half of the neighbors */
sum1Face += QGGMRFPotential(icdInfo->old_xj - icdInfo->neighborsFace[i], reconParams);
if (reconParams->bEdge>=0)
for (i = 0; i < 6; ++i) /* Note: only use first half of the neighbors */
sum1Edge += QGGMRFPotential(icdInfo->old_xj - icdInfo->neighborsEdge[i], reconParams);
if (reconParams->bVertex>=0)
for (i = 0; i < 4; ++i) /* Note: only use first half of the neighbors */
sum1Vertex += QGGMRFPotential(icdInfo->old_xj - icdInfo->neighborsVertex[i], reconParams);
return reconParams->bFace * sum1Face
+ reconParams->bEdge * sum1Edge
+ reconParams->bVertex * sum1Vertex;
}
/* the potential function of the QGGMRF prior model. p << q <= 2 */
float QGGMRFPotential(float delta, struct ReconParams *reconParams)
{
float p, q, T, sigmaX;
float temp, GGMRF_Pot;
p = reconParams->p;
q = reconParams->q;
T = reconParams->T;
sigmaX = reconParams->sigmaX;
GGMRF_Pot = pow(fabs(delta),p)/(p*pow(sigmaX,p));
temp = pow(fabs(delta/(T*sigmaX)), q-p);
return ( GGMRF_Pot * temp/(1.0+temp) );
}
void partialZipline_computeStartStopIndex(long int *j_z_start, long int *j_z_stop, long int indexZiplines, long int numVoxelsPerZipline, long int N_z)
{
*j_z_start = indexZiplines*numVoxelsPerZipline;
*j_z_stop = _MIN_(*j_z_start+numVoxelsPerZipline-1, N_z-1);
}
int partialZipline_computeZiplineIndex(long int j_z, long int numVoxelsPerZipline)
{
return floor(j_z / numVoxelsPerZipline);
}
void prepareICDInfoRandGroup(long int j_x, long int j_y, struct RandomZiplineAux *randomZiplineAux, struct ICDInfo3DCone *icdInfo, struct Image *img, struct ReconParams *reconParams, struct ReconAux *reconAux)
{
/* j = j_y + N_y j_x */
long int j_z, k_M;
long int j_z_start, j_z_stop;
long int indexZiplines;
k_M = 0;
for (indexZiplines = 0; indexZiplines < reconParams->numZiplines; ++indexZiplines)
{
if (!reconAux->NHICD_isPartialUpdateActive || reconAux->NHICD_isPartialZiplineHot[indexZiplines])
{
partialZipline_computeStartStopIndex(&j_z_start, &j_z_stop, indexZiplines, reconParams->numVoxelsPerZipline, img->params.N_z);
for (j_z = j_z_start; j_z <= j_z_stop; ++j_z)
{
if(randomZiplineAux->groupIndex[j_x][j_y][j_z] == randomZiplineAux->k_G)
{
prepareICDInfo(j_x, j_y, j_z, &icdInfo[k_M], img, reconAux, reconParams);
/* Increment k_M. After loop terminates k_M = No. members */
k_M++;
}
}
}
}
randomZiplineAux->N_M = k_M;
}
void computeDeltaXjAndUpdate(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct Image *img, struct ReconAux *reconAux)
{
/**
* Compute voxel increment Delta_xj.
* Delta_xj >= -x_j accomplishes positivity constraint:
*
* Delta_xj = clip{ -theta1/theta2, [-x_j, inf) }
*/
float theta1, theta2;
theta1 = icdInfo->theta1_f + reconParams->priorWeight_QGGMRF*icdInfo->theta1_p_QGGMRF + reconParams->priorWeight_proxMap*icdInfo->theta1_p_proxMap;
theta2 = icdInfo->theta2_f + reconParams->priorWeight_QGGMRF*icdInfo->theta2_p_QGGMRF + reconParams->priorWeight_proxMap*icdInfo->theta2_p_proxMap;
if (theta2 != 0)
{
icdInfo->Delta_xj = -theta1/theta2;
if(reconParams->is_positivity_constraint)
icdInfo->Delta_xj = _MAX_(icdInfo->Delta_xj, -icdInfo->old_xj);
}
else
{
icdInfo->Delta_xj = _MAX_(icdInfo->old_xj, 0);
}
if(icdInfo->Delta_xj != icdInfo->Delta_xj)
{
printf("theta1_f = %e\n", icdInfo->theta1_f);
printf("theta2_f = %e\n", icdInfo->theta2_f);
printf("theta1_p_QGGMRF = %e\n", icdInfo->theta1_p_QGGMRF);
printf("theta2_p_QGGMRF = %e\n", icdInfo->theta2_p_QGGMRF);
printf("theta1_p_proxMap = %e\n", icdInfo->theta1_p_proxMap);
printf("theta2_p_proxMap = %e\n", icdInfo->theta2_p_proxMap);
printf("theta2 = %e\n", theta2);
printf("theta2 = %e\n", theta2);
printf("-t1/t2 = %e\n", -theta1/theta2);
printf("Delta_xj = %e\n", icdInfo->Delta_xj);
printf("------------------------\n");
}
/**
* Update voxel:
*
* x_j <- x_j + Delta_xj
*/
//img->vox[icdInfo->j_x][icdInfo->j_y][icdInfo->j_z] += icdInfo->Delta_xj;
img->vox[index_3D(icdInfo->j_x,icdInfo->j_y,icdInfo->j_z,img->params.N_y,img->params.N_z)] += icdInfo->Delta_xj;
}
void computeDeltaXjAndUpdateGroup(struct ICDInfo3DCone *icdInfo, struct RandomZiplineAux *randomZiplineAux, struct ReconParams *reconParams, struct Image *img, struct ReconAux *reconAux)
{
long int N_M, k_M;
struct ICDInfo3DCone *info;
N_M = randomZiplineAux->N_M;
for (k_M = 0; k_M < N_M; ++k_M)
{
info = &icdInfo[k_M];
computeDeltaXjAndUpdate(info, reconParams, img, reconAux);
}
}
void updateIterationStatsGroup(struct ReconAux *reconAux, struct ICDInfo3DCone *icdInfoArray, struct RandomZiplineAux *randomZiplineAux, struct Image *img, struct ReconParams *reconParams)
{
long int N_M, k_M;
float absDelta, totValue;
struct ICDInfo3DCone *icdInfo;
long int j_x, j_y, j_z;
long int indexZiplines;
j_x = icdInfoArray[0].j_x;
j_y = icdInfoArray[0].j_y;
N_M = randomZiplineAux->N_M;
for (k_M = 0; k_M < N_M; ++k_M)
{
icdInfo = &icdInfoArray[k_M];
j_z = icdInfo->j_z;
indexZiplines = partialZipline_computeZiplineIndex(j_z, reconParams->numVoxelsPerZipline);
absDelta = fabs(icdInfo->Delta_xj);
//totValue = _MAX_(img->vox[j_x][j_y][j_z], icdInfo->old_xj);
totValue = _MAX_(img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)], icdInfo->old_xj);
reconAux->TotalValueChange += absDelta;
reconAux->TotalVoxelValue += totValue;
reconAux->NumUpdatedVoxels++;
reconAux->NHICD_numUpdatedVoxels[indexZiplines]++;
reconAux->NHICD_totalValueChange[indexZiplines] += absDelta;
}
}
void disp_iterationInfo(struct ReconAux *reconAux, struct ReconParams *reconParams, int itNumber, int MaxIterations, float cost, float relUpdate, float stopThresholdChange, float weightScaler_value, float voxelsPerSecond, float ticToc_iteration, float weightedNormSquared_e, float ratioUpdated, float totalEquits)
{
printf("************************** Iteration %-2d (max. %d) **************************\n", itNumber, MaxIterations);
printf("* Cost = %-10.10e\n", cost);
printf("* Rel. Update = %-10.10e %% (threshold = %-10.10e %%)\n", relUpdate*100, stopThresholdChange*100);
printf("* RWFE = ||e||_W/||y||_W = %-10.10e %% (threshold = %-10.10e %%)\n", reconAux->relativeWeightedForwardError*100, reconParams->stopThesholdRWFE_pct);
printf("* RUFE = ||e|| / ||y|| = %-10.10e %% (threshold = %-10.10e %%)\n", reconAux->relativeUnweightedForwardError*100, reconParams->stopThesholdRUFE_pct);
printf("* ----------------------------------------------------------------------------\n");
printf("* 1/M ||e||^2_W = %-10.10e = 1/%-10.10f\n", weightedNormSquared_e, 1/weightedNormSquared_e);
printf("* weightScaler_value = %-10.10e = 1/%-10.10f\n", weightScaler_value, 1/weightScaler_value);
printf("* ----------------------------------------------------------------------------\n");
printf("* voxelsPerSecond = %-10.10e \n", voxelsPerSecond);
printf("* time icd update = %-10.10e s\n", ticToc_iteration);
printf("* ratioUpdated = %-10.10e %%\n", ratioUpdated*100);
printf("* totalEquits = %-10.10e \n", totalEquits);
printf("******************************************************************************\n\n");
}
float computeRelUpdate(struct ReconAux *reconAux, struct ReconParams *reconParams, struct Image *img)
{
float relUpdate;
float AvgValueChange, AvgVoxelValue;
float scaler;
int subsampleFactor = 10; /* when chosen 1 this is completely accurate. User can mess with this to some extend*/
if(reconAux->NumUpdatedVoxels>0)
{
AvgValueChange = reconAux->TotalValueChange / reconAux->NumUpdatedVoxels;
AvgVoxelValue = reconAux->TotalVoxelValue / reconAux->NumUpdatedVoxels;
}
else
{
AvgValueChange = 0;
AvgVoxelValue = 0;
}
if(AvgVoxelValue>0)
{
/* [relativeChangeMode] 'meanImage' or 'fixedScaler' or 'percentile' */
if (strcmp(reconParams->relativeChangeMode, "meanImage")==0)
{
relUpdate = AvgValueChange / AvgVoxelValue;
}
else if (strcmp(reconParams->relativeChangeMode, "fixedScaler")==0)
{
relUpdate = AvgValueChange / reconParams->relativeChangeScaler;
}
else if (strcmp(reconParams->relativeChangeMode, "percentile")==0)
{
//scaler = prctile_copyFast(&img->vox[0][0][0], img->params.N_x*img->params.N_y*img->params.N_z, reconParams->relativeChangePercentile, subsampleFactor);
scaler = prctile_copyFast(&img->vox[0], img->params.N_x*img->params.N_y*img->params.N_z, reconParams->relativeChangePercentile, subsampleFactor);
relUpdate = AvgValueChange / scaler;
}
else
{
printf("Error: relativeChangeMode unknown\n");
exit(-1);
}
}
else
{
relUpdate = 0;
}
return relUpdate;
}
/* * * * * * * * * * * * parallel * * * * * * * * * * * * **/
void prepareParallelAux(struct ParallelAux *parallelAux, long int N_M_max)
{
int numThreads;
#pragma omp parallel
{
#pragma omp master
{
parallelAux->numThreads = numThreads = omp_get_num_threads();
}
}
parallelAux->N_M_max = N_M_max;
parallelAux->partialTheta = (struct PartialTheta**) multialloc(sizeof(struct PartialTheta), 2, numThreads, N_M_max);
parallelAux->j_u = mget_spc(numThreads, sizeof(long int));
parallelAux->i_v = mget_spc(numThreads, sizeof(long int));
parallelAux->B_ij = mget_spc(numThreads, sizeof(float));
parallelAux->k_M = mget_spc(numThreads, sizeof(long int));
parallelAux->j_z = mget_spc(numThreads, sizeof(long int));
parallelAux->i_w = mget_spc(numThreads, sizeof(long int));
parallelAux->A_ij = mget_spc(numThreads, sizeof(float));
}
void freeParallelAux(struct ParallelAux *parallelAux)
{
multifree((void**)parallelAux->partialTheta, 2);
free((void*)parallelAux->j_u);
free((void*)parallelAux->i_v);
free((void*)parallelAux->B_ij);
free((void*)parallelAux->k_M);
free((void*)parallelAux->j_z);
free((void*)parallelAux->i_w);
free((void*)parallelAux->A_ij);
}
void ICDStep3DConeGroup(struct Sino *sino, struct Image *img, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct RandomZiplineAux *randomZiplineAux, struct ParallelAux *parallelAux, struct ReconAux *reconAux)
{
if (randomZiplineAux->N_M>0)
{
computeTheta1Theta2ForwardTermGroup(sino, A, icdInfo, randomZiplineAux, parallelAux, reconParams);
if(reconParams->priorWeight_QGGMRF >= 0)
computeTheta1Theta2PriorTermQGGMRFGroup(icdInfo, reconParams, randomZiplineAux);
if(reconParams->priorWeight_proxMap >= 0)
computeTheta1Theta2PriorTermProxMapGroup(icdInfo, reconParams, randomZiplineAux);
computeDeltaXjAndUpdateGroup(icdInfo, randomZiplineAux, reconParams, img, reconAux);
updateErrorSinogramGroup(sino, A, icdInfo, randomZiplineAux);
}
}
void computeTheta1Theta2ForwardTermGroup(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct RandomZiplineAux *randomZiplineAux, struct ParallelAux *parallelAux, struct ReconParams *reconParams)
{
/**
* Compute forward model term of theta1 and theta2 for all members:
*
* theta1_f = -e^t W A_{*,j}
* theta2_f = A_{*,j}^t W A _{*,j}
*/
long int i_beta, i_v, i_w;
long int j_x, j_y, j_z, j_u;
float B_ij, A_ij;
long int N_M, k_M;
int threadID;
N_M = randomZiplineAux->N_M;
j_x = (icdInfo[0]).j_x;
j_y = (icdInfo[0]).j_y;
for (threadID = 0; threadID < parallelAux->numThreads; ++threadID)
{
for (k_M = 0; k_M < N_M; ++k_M)
{
parallelAux->partialTheta[threadID][k_M].t1 = 0;
parallelAux->partialTheta[threadID][k_M].t2 = 0;
}
}
#pragma omp parallel private(threadID, j_u, i_v, B_ij, k_M, j_z, i_w, A_ij)
{
threadID = omp_get_thread_num();
#pragma omp for
for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta)
{
j_u = A->j_u[j_x][j_y][i_beta];
for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v)
{
B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]];
/* Loop through all the members along zip line */
for (k_M = 0; k_M < N_M; ++k_M)
{
j_z = icdInfo[k_M].j_z;
for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w)
{
A_ij = B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]];
parallelAux->partialTheta[threadID][k_M].t1 -=
sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* A_ij;
parallelAux->partialTheta[threadID][k_M].t2 +=
A_ij
* sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]
* A_ij;
}
}
}
}
}
for (threadID = 0; threadID < parallelAux->numThreads; ++threadID)
{
for (k_M = 0; k_M < N_M; ++k_M)
{
icdInfo[k_M].theta1_f += parallelAux->partialTheta[threadID][k_M].t1;
icdInfo[k_M].theta2_f += parallelAux->partialTheta[threadID][k_M].t2;
}
}
if(strcmp(reconParams->weightScaler_domain,"spatiallyInvariant") == 0)
{
for (k_M = 0; k_M < N_M; ++k_M)
{
icdInfo[k_M].theta1_f /= sino->params.weightScaler_value;
icdInfo[k_M].theta2_f /= sino->params.weightScaler_value;
}
}
else
{
fprintf(stderr, "ERROR in computeTheta1Theta2ForwardTerm: can't recongnize weightScaler_domain.\n");
exit(-1);
}
}
void computeTheta1Theta2PriorTermQGGMRFGroup(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct RandomZiplineAux *randomZiplineAux)
{
long int N_M, k_M;
N_M = randomZiplineAux->N_M;
#pragma omp parallel for
for (k_M = 0; k_M < N_M; ++k_M)
{
computeTheta1Theta2PriorTermQGGMRF(&icdInfo[k_M], reconParams);
}
}
void updateErrorSinogramGroup(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct RandomZiplineAux *randomZiplineAux)
{
/**
* Update error sinogram
*
* e <- e - A_{*,j} * Delta_xj
*/
long int N_M, k_M;
long int i_beta, i_v, i_w;
long int j_x, j_y, j_z, j_u;
float B_ij;
N_M = randomZiplineAux->N_M;
j_x = icdInfo[0].j_x;
j_y = icdInfo[0].j_y;
#pragma omp parallel for private(j_u, i_v, B_ij, k_M, j_z, i_w)
for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta)
{
j_u = A->j_u[j_x][j_y][i_beta];
for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v)
{
B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]];
for (k_M = 0; k_M < N_M; ++k_M)
{
j_z = icdInfo[k_M].j_z;
for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w)
{
sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] -=
B_ij
* A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]]
* icdInfo[k_M].Delta_xj;
}
}
}
}
}
void computeTheta1Theta2PriorTermProxMapGroup(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct RandomZiplineAux *randomZiplineAux)
{
long int N_M, k_M;
N_M = randomZiplineAux->N_M;
for (k_M = 0; k_M < N_M; ++k_M)
{
icdInfo[k_M].theta1_p_proxMap = (icdInfo[k_M].old_xj - icdInfo[k_M].proxMapInput_j) / (reconParams->sigma_lambda * reconParams->sigma_lambda);
icdInfo[k_M].theta2_p_proxMap = 1.0 / (reconParams->sigma_lambda * reconParams->sigma_lambda);
}
}
/* * * * * * * * * * * * time aux ICD * * * * * * * * * * * * **/
void speedAuxICD_reset(struct SpeedAuxICD *speedAuxICD)
{
speedAuxICD->numberUpdatedVoxels = 0;
speedAuxICD->tic = omp_get_wtime();
speedAuxICD->toc = -1.0;
speedAuxICD->voxelsPerSecond = -1.0;
}
void speedAuxICD_update(struct SpeedAuxICD *speedAuxICD, long int incrementNumber)
{
speedAuxICD->numberUpdatedVoxels += incrementNumber;
}
void speedAuxICD_computeSpeed(struct SpeedAuxICD *speedAuxICD)
{
if (speedAuxICD->numberUpdatedVoxels > 0)
{
speedAuxICD->toc = omp_get_wtime();
speedAuxICD->voxelsPerSecond = ((float)speedAuxICD->numberUpdatedVoxels) / (speedAuxICD->toc - speedAuxICD->tic);
}
else
{
speedAuxICD->voxelsPerSecond = 0;
}
}
/* * * * * * * * * * * * NHICD * * * * * * * * * * * * **/
int NHICD_isVoxelHot(struct ReconParams *reconParams, struct Image *img, long int j_x, long int j_y, long int j_z, float lastChangeThreshold)
{
if(img->lastChange[j_x][j_y][j_z] > lastChangeThreshold)
return 1;
if(bernoulli(reconParams->NHICD_random/100)==1)
return 1;
return 0;
}
int NHICD_activatePartialUpdate(struct ReconParams *reconParams, float relativeWeightedForwardError)
{
if (relativeWeightedForwardError*100<reconParams->NHICD_ThresholdAllVoxels_ErrorPercent && strcmp(reconParams->NHICD_Mode, "off")!=0)
return 1;
else
return 0;
}
int NHICD_checkPartialZiplineHot(struct ReconAux *reconAux, long int j_x, long int j_y, long int indexZiplines, struct Image *img)
{
if (reconAux->NHICD_isPartialUpdateActive)
{
if (img->lastChange[j_x][j_y][indexZiplines]>=reconAux->lastChangeThreshold || img->timeToChange[j_x][j_y][indexZiplines]==0)
{
return 1;
}
else
{
img->timeToChange[j_x][j_y][indexZiplines] = _MAX_(img->timeToChange[j_x][j_y][indexZiplines]-1, 0);
return 0;
}
}
else
{
return 1;
}
}
void NHICD_checkPartialZiplinesHot(struct ReconAux *reconAux, long int j_x, long int j_y, struct ReconParams *reconParams, struct Image *img)
{
long int indexZiplines;
for (indexZiplines = 0; indexZiplines < reconParams->numZiplines; ++indexZiplines)
{
reconAux->NHICD_isPartialZiplineHot[indexZiplines] = NHICD_checkPartialZiplineHot(reconAux, j_x, j_y, indexZiplines, img);
reconAux->NHICD_numUpdatedVoxels[indexZiplines] = 0;
reconAux->NHICD_totalValueChange[indexZiplines] = 0;
}
}
void updateNHICDStats(struct ReconAux *reconAux, long int j_x, long int j_y, struct Image *img, struct ReconParams *reconParams)
{
long int jj_x, jj_y, jj_x_min, jj_y_min, jj_x_max, jj_y_max;
float avgChange;
float mean_timeToChange;
long int sigma_timeToChange;
long int indexZiplines;
float w_self = 1;
float w_past = 0.5;
float w_neighbors = 0.5;
mean_timeToChange = 100.0/reconParams->NHICD_random-1;
sigma_timeToChange = round(mean_timeToChange*0.5);
jj_x_min = _MAX_(j_x-1, 0);
jj_y_min = _MAX_(j_y-1, 0);
jj_x_max = _MIN_(j_x+1, img->params.N_x-1);
jj_y_max = _MIN_(j_y+1, img->params.N_y-1);
for (indexZiplines = 0; indexZiplines < reconParams->numZiplines; ++indexZiplines)
{
if (reconAux->NHICD_isPartialZiplineHot[indexZiplines])
{
avgChange = reconAux->NHICD_numUpdatedVoxels[indexZiplines] > 0 ? reconAux->NHICD_totalValueChange[indexZiplines]/reconAux->NHICD_numUpdatedVoxels[indexZiplines] : 0;
img->lastChange[j_x][j_y][indexZiplines] = w_past * img->lastChange[j_x][j_y][indexZiplines] + w_self * avgChange;
for (jj_x = jj_x_min; jj_x <= jj_x_max; ++jj_x)
{
for (jj_y = jj_y_min; jj_y <= jj_y_max; ++jj_y)
{
img->lastChange[jj_x][jj_y][indexZiplines] += w_neighbors * reconAux->NHICD_neighborFilter[1+jj_x-j_x][1+jj_y-j_y] * avgChange;
}
}
img->timeToChange[j_x][j_y][indexZiplines] = almostUniformIntegerRV(mean_timeToChange, sigma_timeToChange);
}
}
}
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(p->red,p->green,p->blue,&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatMagickString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatMagickString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
omp_ex_16.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
unsigned int a = 90;
int main()
{
#pragma omp threadprivate(a)
printf("Before a = %i\n", a);
#pragma omp parallel
{
a += 10 + omp_get_thread_num();
printf("Inside a = %i\n", a);
}
printf("After a = %i\n", a);
return 0;
}
|
21_omp_task_struct.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s --check-prefix=CHECK-opt
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
extern void MPI_call(void*);
typedef struct {
int x;
float y;
} X;
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: %x = alloca
// check-inst: %0 = bitcast %struct.X* %x to i8*
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 {{[0-9]+}}, i64 1)
X x;
#pragma omp parallel
{
#pragma omp task
{ MPI_call(&x); }
}
}
// FIXME one alloca is of the anon struct detected as OMP task struct related (need refinement of condition?)
// The Pattern: a = alloca struct; b = task_alloc; mem_cpy a to b;
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 2
// CHECK-NEXT: Global : 0
// CHECK-opt: TypeArtPass [Heap & Stack]
// CHECK-opt-NEXT: Malloc : 0
// CHECK-opt-NEXT: Free : 0
// CHECK-opt-NEXT: Alloca : 1
// CHECK-opt-NEXT: Global : 0 |
omp_parallel_for_lastprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for lastprivate directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for lastprivate</ompts:directive>
<ompts:dependences>omp parallel for reduction,omp parallel for private</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_lastprivate</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int sum;
int i;
int i0;
</ompts:orphan:vars>
sum =0;
i0 = -1;
int known_sum;
#pragma omp parallel for reduction(+:sum) schedule(static,7) private(i) <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck>
<ompts:orphan>
for (i = 1; i <= LOOPCOUNT; i++)
{
sum = sum + i;
i0 = i;
} /*end of for*/
/* end of parallel*/
</ompts:orphan>
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return ((known_sum == sum) && (i0 == LOOPCOUNT));
} /* end of check_parallel_for_lastprivate */
</ompts:testcode>
</ompts:test>
|
SpaceFrame v4.2.h | #include <Windows.h>
#include <ctype.h>
#include <iostream>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
using namespace std;
class SpaceFrame
{
private:
double EPS;
double MAXTS;
double MAXLV;
int TNN; // total number of nodes
int NFIN; // number of fixed nodes
int NFRN; // number of free nodes
int NOR; // number of rods
int NOL; // number of loads
int NOS; // number of sections
struct Node // parameters of nodes
{
double XCN; // X coordinate of nodes
double YCN; // Y coordinate of nodes
double ZCN; // Z coordinate of nodes
};
Node *nodes; // parameters of nodes
struct Rod // parameters of nodes
{
int ENR; // the end node number of rods
int BNR; // the beginning node number of rods
double ELASTIC; // elastic modulus
double SHEAR; // shear modulus
double AREA; // area
double IMY; // inertia moment of Y axis
double IMZ; // inertia moment of Z axis
double THETA; // theta the deflection angle of main inertia axis
double LCS[4]; // the length, sine and cosine of rods
double RFE[6]; // the reaction force of the end node
};
Rod *rods; // parameters of nodes
struct Load // parameters of loads
{
int NRL; // the number of rods with load
int PLI; // the plane of the load's in
int KOL; // the kind of load
double VOL; // the value of load
double DLB; // the distance between load and the beginning node
};
Load *loads; // parameters of loads
struct Section // parameters of sections
{
int NRS; // the number of rod with section
double DSB; // the distance between section and the beginning node
double IFS[6]; // the internal force in the section
};
Section *sections; // parameters of sections
double *TotalStiffness; // total stiffness
double *LoadVector; // load vector
double *Displacement; // displacement of nodes
int *IV; // the location of diagonal element
int NSI; // upper limit
int MAXIBDW; // half bandwidth
bool ProgressBar; // open progress bar
bool Parallel; // open parallel
// calculate the length sine and cosine of rods
bool sfLCosSin()
{
for (int k = 0; k < NOR; k++)
{
int i = rods[k].BNR - 1, j = rods[k].ENR - 1; // index of beginning and end nodes of rods
rods[k].LCS[1] = nodes[j].XCN - nodes[i].XCN;
rods[k].LCS[2] = nodes[j].YCN - nodes[i].YCN;
rods[k].LCS[3] = nodes[j].ZCN - nodes[i].ZCN;
rods[k].LCS[0] = sqrt(rods[k].LCS[1] * rods[k].LCS[1] + rods[k].LCS[2] * rods[k].LCS[2] + rods[k].LCS[3] * rods[k].LCS[3]);
if (rods[k].LCS[0] < EPS) // if the length of rod is too small, then return error
{
sfPrintError(8);
return 1;
}
rods[k].LCS[1] = rods[k].LCS[1] / rods[k].LCS[0];
rods[k].LCS[2] = rods[k].LCS[2] / rods[k].LCS[0];
rods[k].LCS[3] = rods[k].LCS[3] / rods[k].LCS[0];
}
return 0;
}
// allocate total stiffness matrix, load vector and displacement vector
bool sfAllocate()
{
int it = 0, mm = 0, dof = 6 * NFRN;
int *peribdw = new int[TNN](); // bandwidth per line in total stiffness matrix
IV = new int[dof]();
for (int i = 0; i < NOR; i++) // for each rod
{
if (rods[i].BNR > NFIN)
{
mm = rods[i].ENR - rods[i].BNR; // bandwidth is end number minus begin number
if (mm > peribdw[rods[i].ENR - 1])
peribdw[rods[i].ENR - 1] = mm; // find the maximum bandwith per line
}
}
for (int i = NFIN; i < TNN; i++) // for each line in total stiffness matrix
{
if (peribdw[i] > MAXIBDW) // find maxim
MAXIBDW = peribdw[i];
for (int j = 1; j <= 6; j++)
{
it = it + 1;
if (it == 1)
IV[it - 1] = 6 * peribdw[i] + j;
else
IV[it - 1] = IV[it - 2] + 6 * peribdw[i] + j;
}
}
MAXIBDW = 6 * MAXIBDW + 5;
NSI = IV[dof - 1];
delete[] peribdw;
TotalStiffness = new double[NSI](); // allocate memory for total stiffness matrix
LoadVector = new double[dof](); // allocate memory for load vector
Displacement = new double[dof](); // allocate memory for displacement vector
return 0;
}
// build total stiffness matrix
bool sfBuildTotalStiff() // ts is total stiffness matrix
{
double us[36] = {0}; // unit stiffness matrix
int p[2] = {0}; // p is a temperary vector for i0j0, dof is the degree of freedom of nods
for (int k = 0; k < NOR; k++)
{
p[0] = 6 * (rods[k].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[k].ENR - NFIN - 1);
for (int i = 0; i < 2; i++)
{
if (p[i] >= 0) // determine free node
{
if (sfBuildUnitStiff(k, i + 1, us)) // build unit stiffness matrix
{
sfPrintError(7);
return 1;
}
for (int m = 0; m < 6; m++)
for (int n = 0; n <= m; n++)
TotalStiffness[IV[(p[i] + m)] + (p[i] + n) - (p[i] + m) - 1] += us[m * 6 + n]; // superpose
}
}
if (p[0] >= 0 && p[1] >= 0)
{
if (sfBuildUnitStiff(k, 3 + 1, us)) // build unit stiffness matrix
{
sfPrintError(7);
return 1;
}
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
TotalStiffness[IV[(p[1] + m)] + (p[0] + n) - (p[1] + m) - 1] += us[m * 6 + n]; // superpose
}
}
for (int i = 0; i < NSI; i++)
if (fabs(TotalStiffness[i]) > MAXTS)
MAXTS = TotalStiffness[i];
return 0;
}
// build unit stiffness matrix
bool sfBuildUnitStiff(int k, int flag, double *us) // k is the number of rods, flag is the index of matrix parts, us is the unit stiffness matrix
{
if (k < 0)
{
sfPrintError(16);
return 0;
}
if (flag < 1 || flag > 4)
{
sfPrintError(16);
return 0;
}
if (us == NULL)
{
sfPrintError(16);
return 0;
}
double rd[36] = {0}, t[36] = {0}, c[36] = {0}, tmp = 0; // rd is local stiffness matrix, t is transpose matrix, c is a temperary matrix
memset(us, 0, 36 * sizeof(double));
if (sfBuildLocalStiff(k, flag, rd)) // build local stiffness matrix
{
sfPrintError(9);
return 1;
}
if (sfBuildTrans(k, t)) // build transpose matrix
{
sfPrintError(10);
return 1;
}
for (int i = 0; i < 6; i++) // transpose matrix times local stiffness matrix, store the result in c
for (int m = 0; m < 6; m++)
{
tmp = t[i * 6 + m];
for (int j = 0; j < 6; j++)
c[i * 6 + j] += tmp * rd[m * 6 + j];
}
for (int i = 0; i < 6; i++) // c times the transposition of transpose matrix, store the result in unit stiff
for (int j = 0; j < 6; j++)
for (int m = 0; m < 6; m++)
us[i * 6 + j] += c[i * 6 + m] * t[j * 6 + m];
return 0;
}
// build local stiffness matrix
bool sfBuildLocalStiff(int k, int flag, double *rd) // k is the number of rods, flag is the number of matrix
{
if (k < 0)
{
sfPrintError(17);
return 0;
}
if (flag < 0 || flag > 4)
{
sfPrintError(17);
return 0;
}
if (rd == NULL)
{
sfPrintError(17);
return 0;
}
double a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0, l = rods[k].LCS[0];
a = rods[k].ELASTIC * rods[k].AREA / l; // EA/1
b = rods[k].SHEAR * (rods[k].IMY + rods[k].IMZ) / l; // GJ(p)/1
c = 4 * rods[k].ELASTIC * rods[k].IMY / l; // 4EJ(y)/1
d = c / 2 * 3 / l; // 6EJ(z)/l/l
e = 2 * d / l; // 12EJ(y)/l/l/l
f = 4 * rods[k].ELASTIC * rods[k].IMZ / l; // 4EJ(z)/l
g = f / 2 * 3 / l; // 6EJ(Z)/l/l
h = 2 * g / l; // 12EJ(z)/l/l/l
switch (flag)
{
case 1: // k11
rd[0 * 6 + 0] = a;
rd[1 * 6 + 1] = h;
rd[1 * 6 + 5] = rd[5 * 6 + 1] = g;
rd[2 * 6 + 2] = e;
rd[2 * 6 + 4] = rd[4 * 6 + 2] = -d;
rd[3 * 6 + 3] = b;
rd[4 * 6 + 4] = c;
rd[5 * 6 + 5] = f;
break;
case 2: // k22
rd[0 * 6 + 0] = a;
rd[1 * 6 + 1] = h;
rd[1 * 6 + 5] = rd[5 * 6 + 1] = -g;
rd[2 * 6 + 2] = e;
rd[2 * 6 + 4] = rd[4 * 6 + 2] = d;
rd[3 * 6 + 3] = b;
rd[4 * 6 + 4] = c;
rd[5 * 6 + 5] = f;
break;
case 3: // k12
rd[0 * 6 + 0] = -a;
rd[1 * 6 + 1] = -h;
rd[1 * 6 + 5] = g;
rd[5 * 6 + 1] = -g;
rd[2 * 6 + 2] = -e;
rd[2 * 6 + 4] = -d;
rd[4 * 6 + 2] = d;
rd[3 * 6 + 3] = -b;
rd[4 * 6 + 4] = c / 2;
rd[5 * 6 + 5] = f / 2;
break;
case 4: // k21
rd[0 * 6 + 0] = -a;
rd[1 * 6 + 1] = -h;
rd[1 * 6 + 5] = -g;
rd[5 * 6 + 1] = g;
rd[2 * 6 + 2] = -e;
rd[2 * 6 + 4] = d;
rd[4 * 6 + 2] = -d;
rd[3 * 6 + 3] = -b;
rd[4 * 6 + 4] = c / 2;
rd[5 * 6 + 5] = f / 2;
break;
default:
break;
}
return 0;
}
// build transpose matrix
bool sfBuildTrans(int k, double *t) // k is the number of rods, t is transpose matrix
{
if (k < 0)
{
sfPrintError(18);
return 0;
}
if (t == NULL)
{
sfPrintError(18);
return 0;
}
double coa = 0, cob = 0, coc = 0, sic = 0, sit = 0, cot = 0, m = 0, n = 0; // co means cosine, si means sine, m and n is temperary variable
memset(t, 0, 36 * sizeof(double));
coa = rods[k].LCS[1]; // cosine alpha
cob = rods[k].LCS[2]; // cosine beta
coc = rods[k].LCS[3]; // cosine gama
sit = sin(rods[k].THETA); // sine theta
cot = cos(rods[k].THETA); // cosine theta
if (fabs(coc - 1) < EPS) // vertical(z axis positive direction) rods' transpose matrix
{
t[2 * 6 + 0] = t[5 * 6 + 3] = 1;
t[0 * 6 + 1] = t[3 * 6 + 4] = t[1 * 6 + 2] = t[4 * 6 + 5] = sit;
t[1 * 6 + 1] = t[4 * 6 + 4] = cot;
t[0 * 6 + 2] = t[3 * 6 + 5] = -cot;
}
else if (fabs(coc + 1) < EPS) // vertical(z axis negative direction) rods' transpose matrix
{
t[2 * 6 + 0] = t[5 * 6 + 3] = -1;
t[0 * 6 + 1] = t[3 * 6 + 4] = sit;
t[1 * 6 + 2] = t[4 * 6 + 5] = -sit;
t[1 * 6 + 1] = t[4 * 6 + 4] = t[0 * 6 + 2] = t[3 * 6 + 5] = cot;
}
else
{
sic = sqrt(1 - coc * coc); // sine gama
m = coa * coc; // cosine alpha times cosine gama
n = cob * coc; // cosine beta times cosine gama
t[0 * 6 + 0] = t[3 * 6 + 3] = coa;
t[1 * 6 + 0] = t[4 * 6 + 3] = cob;
t[2 * 6 + 0] = t[5 * 6 + 3] = coc;
t[0 * 6 + 1] = t[3 * 6 + 4] = (cob * sit - m * cot) / sic;
t[1 * 6 + 1] = t[4 * 6 + 4] = -(n * cot + coa * sit) / sic;
t[2 * 6 + 1] = t[5 * 6 + 4] = cot * sic;
t[0 * 2 + 2] = t[3 * 6 + 5] = (m * sit + cob * cot) / sic;
t[1 * 6 + 2] = t[4 * 6 + 5] = (n * sit - coa * cot) / sic;
t[2 * 6 + 2] = t[5 * 6 + 5] = -sit * sic;
}
return 0;
}
// build load vector
bool sfBuildLoadVector(double *lv) // lv is the load vector
{
if (lv == 0)
{
sfPrintError(19);
return 0;
}
int rod = 0, p[2] = {0}; // rod is the number of rods, dof is the degree of freedom
double rf[12] = {0}, t[36] = {0}; // rf is the reaction force matrix, t is the transpose matrix, p is a temperary vector for i0j0
for (int i = 0; i < NOL; i++)
{
rod = loads[i].NRL - 1; // the number of rods with load
memset(rf, 0, 12 * sizeof(double)); // zero clearing
if (sfReactionForce(i, &rf[0 * 6], &rf[1 * 6])) // calculate reaction force
{
sfPrintError(11);
return 1;
}
for (int j = 0; j < 6; j++) // add reaction force to RFE
rods[rod].RFE[j] += rf[1 * 6 + j];
if (sfBuildTrans(rod, t)) // build transpose matrix
{
sfPrintError(10);
return 1;
}
p[0] = 6 * (rods[rod].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[rod].ENR - NFIN - 1);
for (int j = 0; j < 2; j++) // add reaction force to load vector
{
if (p[j] >= 0) // determine free node
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
lv[p[j] + m] -= t[m * 6 + n] * rf[j * 6 + n];
}
}
for (int i = 0; i < 6 * NFRN; i++)
if (fabs(lv[i]) > MAXLV)
MAXLV = lv[i];
return 0;
}
// calculate reaction force
bool sfReactionForce(int i, double *rfb, double *rfe) // i is the number of load, rfb and rfe is the reaction force at begining and end of rods
{
if (i < 0)
{
sfPrintError(20);
return 0;
}
if (rfb == NULL)
{
sfPrintError(20);
return 0;
}
if (rfe == NULL)
{
sfPrintError(20);
return 0;
}
double ra = 0, rb = 0, a = 0, b = 0, q = loads[i].VOL, xq = loads[i].DLB; // ra, rb, a and b are middle variable
int rod = loads[i].NRL - 1, pm = loads[i].PLI, t = 0; // rod is the number of rods
if (pm == 0) // load is in XY plane
t = -1; // The bending moment in the support-reaction equation is positive clockwise, convert it to positive to the coordinate axis
else if (pm == 1) // load is in XZ plane
t = 1; // The bending moment in the support-reaction equation is positive clockwise, convert it to positive to the coordinate axis
ra = loads[i].DLB / rods[rod].LCS[0]; // x(q) / L
rb = 1 - ra; // 1 - x(q) / L
switch (loads[i].KOL)
{
case 1: // vertical concentrating load
a = rb * rb;
rfb[pm + 1] = -q * rb * (1 + ra - 2 * ra * ra);
rfe[pm + 1] = -q - rfb[pm + 1];
rfb[5 - pm] = t * q * rb * ra * (rods[rod].LCS[0] - xq);
rfe[5 - pm] = -t * q * ra * rb * xq;
break;
case 2: // vertical uniform load
a = q * xq;
b = a * xq / 12;
rfb[pm + 1] = -a * (1 + 0.5 * ra * ra * ra - ra * ra);
rfe[pm + 1] = -a - rfb[pm + 1];
rfb[5 - pm] = t * b * (6 - 8 * ra + 3 * ra * ra);
rfe[5 - pm] = -t * b * (4 * ra - 3 * ra * ra);
break;
case 3: // axial concentrating force when PLI == 0, torque when PLI ==1
rfb[3 * pm] = -q * rb;
rfe[3 * pm] = -q * ra;
break;
case 4: // axial uniform load
a = q * xq;
rfe[3 * pm] = -a * ra / 2;
rfb[3 * pm] = -a - rfe[3 * pm];
break;
case 5: // vertical triangle distributed load
a = q * xq / 2;
b = -0.4 * ra * ra;
rfb[pm + 1] = -2 * a * (0.5 - 0.75 * ra * ra + 0.4 * ra * ra * ra);
rfe[pm + 1] = -a - rfb[pm + 1];
rfb[5 - pm] = t * a * (2 / 3 + b - ra);
rfe[5 - pm] = -t * a * (0.5 * ra + b);
break;
case 6: // concentrating bending moment
rfb[2 - pm] = t * 6 * q * rb * ra / rods[rod].LCS[0];
rfe[2 - pm] = -rfb[2 - pm];
rfb[pm + 4] = t * q * rb * (-1 + 3 * ra);
rfe[pm + 4] = t * q * ra * (2 - 3 * ra);
break;
case 7: // unifrom temperature rise
rfb[0] = q * xq * rods[rod].ELASTIC * rods[rod].AREA;
rfe[0] = -rfb[0];
break;
case 8: // different temperature rise
if (pm == 0)
a = rods[rod].IMZ;
else if (pm == 1)
a = rods[rod].IMY;
rfb[5 - pm] = t * q * 2 * rods[rod].ELASTIC * a * xq;
rfe[5 - pm] = -rfb[5 - pm];
break;
default:
break;
}
return 0;
}
// solve equation of matrix by conjugate gradient
bool sfConjugateGradient(double *A, double *b, double *x, int N)
{
if (A == NULL)
{
sfPrintError(12);
return 1;
}
else if (b == NULL)
{
sfPrintError(12);
return 1;
}
else if (x == NULL)
{
sfPrintError(12);
return 1;
}
else if (N == 0)
{
sfPrintError(12);
return 1;
}
double *r = NULL, *p = NULL, *z = NULL;
double gamma = 0, gamma_new = 0, gamma_new_sqrt = 0, alpha = 0, beta = 0;
int percent = 0, percent_new = 0;
if (ProgressBar)
{
printf("\rSolving equation [ 0%% ][ ]");
}
r = (double *)malloc(N * sizeof(double));
memset(r, 0, sizeof(double));
p = (double *)malloc(N * sizeof(double));
memset(p, 0, sizeof(double));
z = (double *)malloc(N * sizeof(double));
memset(z, 0, sizeof(double));
for (int i = 0; i < NSI; i++)
A[i] = A[i] / MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] / MAXLV;
// x = [0 ... 0]
// r = b - A * x
// p = r
// gamma = r' * r
gamma = 0.0;
for (int i = 0; i < N; ++i)
{
x[i] = 0.0;
r[i] = b[i];
p[i] = r[i];
gamma += r[i] * r[i];
}
for (int n = 0; 1; ++n)
{
// z = A * p
for (int i = 0; i < N; i++)
{
z[i] = 0.0;
for (int j = 0; j < N; j++)
{
if (i == j)
{
z[i] += A[IV[i] - 1] * p[j];
}
else if (j > i)
{
if ((IV[j] - j + i) > IV[j - 1])
z[i] += A[IV[j] - j + i - 1] * p[j];
else
z[i] += 0;
}
else if (i > j)
{
if ((IV[i] - i + j) > IV[i - 1])
z[i] += A[IV[i] - i + j - 1] * p[j];
else
z[i] += 0;
}
}
}
// alpha = gamma / (p' * z)
alpha = 0.0;
for (int i = 0; i < N; ++i)
alpha += p[i] * z[i];
alpha = gamma / alpha;
// x = x + alpha * p
// r = r - alpha * z
// gamma_new = r' * r
gamma_new = 0.0;
for (int i = 0; i < N; ++i)
{
x[i] += alpha * p[i];
r[i] -= alpha * z[i];
gamma_new += r[i] * r[i];
}
gamma_new_sqrt = sqrt(gamma_new);
if (gamma_new_sqrt < EPS)
break;
if (ProgressBar)
{
percent_new = (int)((1 - log10(gamma_new_sqrt * 1e15) / 16) * 100);
if (percent_new > percent)
{
percent = percent_new;
printf("\rSolving equation ");
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
printf(".");
else
printf(" ");
printf("[ %d%% ]", percent_new);
printf("[");
for (int i = 0; i < 49; i++)
if (i < percent / 2)
printf("=");
else
printf(" ");
printf("]");
}
else
{
printf("\rSolving equation ");
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
printf(".");
else
printf(" ");
}
}
beta = gamma_new / gamma;
// p = r + (gamma_new / gamma) * p;
for (int i = 0; i < N; ++i)
p[i] = r[i] + beta * p[i];
// gamma = gamma_new
gamma = gamma_new;
}
for (int i = 0; i < NSI; i++)
A[i] = A[i] * MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] * MAXLV;
for (int i = 0; i < N; i++)
x[i] = x[i] * MAXLV / MAXTS;
if (ProgressBar)
{
printf("\rSolving equation done [ 100%% ][=================================================]\n");
}
free(r);
free(p);
free(z);
return 0;
}
// solve equation of matrix by conjugate gradient parallel
bool sfConjugateGradientPar(double *A, double *b, double *x, int N)
{
if (A == NULL)
{
sfPrintError(12);
return 1;
}
else if (b == NULL)
{
sfPrintError(12);
return 1;
}
else if (x == NULL)
{
sfPrintError(12);
return 1;
}
else if (N == 0)
{
sfPrintError(12);
return 1;
}
double *r = NULL, *p = NULL, *z = NULL;
double gamma = 0, gamma_new = 0, gamma_new_sqrt = 0, alpha = 0, beta = 0;
int percent = 0, percent_new = 0;
if (ProgressBar)
{
printf("\rSolving equation [ 0%% ][ ]");
}
r = (double *)malloc(N * sizeof(double));
memset(r, 0, sizeof(double));
p = (double *)malloc(N * sizeof(double));
memset(p, 0, sizeof(double));
z = (double *)malloc(N * sizeof(double));
memset(z, 0, sizeof(double));
for (int i = 0; i < NSI; i++)
A[i] = A[i] / MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] / MAXLV;
// x = [0 ... 0]
// r = b - A * x
// p = r
// gamma = r' * r
gamma = 0.0;
#pragma omp parallel for reduction(+ \
: gamma)
for (int i = 0; i < N; ++i)
{
x[i] = 0.0;
r[i] = b[i];
p[i] = r[i];
gamma += r[i] * r[i];
}
for (int n = 0; true; ++n)
{
// z = A * p
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
z[i] = 0.0;
for (int j = 0; j < N; j++)
{
if (i == j)
{
z[i] += A[IV[i] - 1] * p[j];
}
else if (j > i)
{
if ((IV[j] - j + i) > IV[j - 1])
z[i] += A[IV[j] - j + i - 1] * p[j];
else
z[i] += 0;
}
else if (i > j)
{
if ((IV[i] - i + j) > IV[i - 1])
z[i] += A[IV[i] - i + j - 1] * p[j];
else
z[i] += 0;
}
}
}
// alpha = gamma / (p' * z)
alpha = 0.0;
#pragma omp parallel for reduction(+ \
: alpha)
for (int i = 0; i < N; ++i)
alpha += p[i] * z[i];
alpha = gamma / alpha;
// x = x + alpha * p
// r = r - alpha * z
// gamma_new = r' * r
gamma_new = 0.0;
#pragma omp parallel for reduction(+ \
: gamma_new)
for (int i = 0; i < N; ++i)
{
x[i] += alpha * p[i];
r[i] -= alpha * z[i];
gamma_new += r[i] * r[i];
}
gamma_new_sqrt = sqrt(gamma_new);
if (gamma_new_sqrt < EPS)
break;
if (ProgressBar)
{
percent_new = (int)((1 - log10(gamma_new_sqrt * 1e15) / 16) * 100);
if (percent_new > percent)
{
percent = percent_new;
printf("\rSolving equation ");
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
printf(".");
else
printf(" ");
printf("[ %d%% ]", percent_new);
printf("[");
for (int i = 0; i < 49; i++)
if (i < percent / 2)
printf("=");
else
printf(" ");
printf("]");
}
else
{
printf("\rSolving equation ");
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
printf(".");
else
printf(" ");
}
}
beta = gamma_new / gamma;
// p = r + (gamma_new / gamma) * p;
#pragma omp parallel for
for (int i = 0; i < N; ++i)
p[i] = r[i] + beta * p[i];
// gamma = gamma_new
gamma = gamma_new;
}
for (int i = 0; i < NSI; i++)
A[i] = A[i] * MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] * MAXLV;
for (int i = 0; i < N; i++)
x[i] = x[i] * MAXLV / MAXTS;
if (ProgressBar)
{
printf("\rSolving equation done [ 100%% ][=================================================]\n");
}
free(r);
free(p);
free(z);
return 0;
}
// calculate internal force of rods
bool sfInternalForce(int mm, int k, double xp) // m is the number of sections, k is the actual number of rods, xp is the distance between the section and the begining of rods
{
if (mm < 0)
{
sfPrintError(21);
return 0;
}
if (k < 0)
{
sfPrintError(21);
return 0;
}
double tf[6] = {0}; // tf is temperary variable
sections[mm].IFS[0] = +rods[k - 1].RFE[0]; // calculate internal force cause by reaction force at the end of rods
sections[mm].IFS[1] = -rods[k - 1].RFE[1];
sections[mm].IFS[2] = -rods[k - 1].RFE[2];
sections[mm].IFS[3] = +rods[k - 1].RFE[3];
sections[mm].IFS[4] = -rods[k - 1].RFE[4] + rods[k - 1].RFE[2] * (rods[k - 1].LCS[0] - xp);
sections[mm].IFS[5] = +rods[k - 1].RFE[5] + rods[k - 1].RFE[1] * (rods[k - 1].LCS[0] - xp);
for (int i = 0; i < NOL; i++) // for every rods
if (loads[i].NRL == k) // if load is on rod k
{
memset(tf, 0, 6 * sizeof(double)); // zero clear tf
if (sfCtlInternalForce(i, xp, tf)) // calculate internal force of cantilever beam
{
sfPrintError(13);
return 1;
}
for (int j = 0; j < 6; j++) // add internal force of cantilever into IFR
sections[mm].IFS[j] += tf[j];
}
if (sfDisplacementForce(k, tf)) // calculate end force
{
sfPrintError(14);
return 1;
}
sections[mm].IFS[0] -= tf[0]; // calculate section force cause by end force
sections[mm].IFS[1] += tf[1];
sections[mm].IFS[2] += tf[2];
sections[mm].IFS[3] -= tf[3];
sections[mm].IFS[4] += tf[4] + tf[2] * xp;
sections[mm].IFS[5] -= tf[5] - tf[1] * xp;
return 0;
}
// calculate internal force of cantilever beam
bool sfCtlInternalForce(int i, double xp, double *tf) // i is the number of load, xp is the distance between the section and the begining of rod, tf is internal force
{
if (i < 0)
{
sfPrintError(22);
return 0;
}
if (tf == NULL)
{
sfPrintError(22);
return 0;
}
double xq = loads[i].DLB, t = xq - xp, r = xp / xq, q = loads[i].VOL; // t and r are temperary variables
int e = loads[i].PLI;
switch (loads[i].KOL) // calculate section force according to kind of loads
{
case 1:
if (xp < xq)
{
tf[e + 1] = -q;
tf[5 - e] = q * t;
}
break;
case 2:
if (xp < xq)
{
tf[e + 1] = -q * t;
tf[5 - e] = 0.5 * q * t * t;
}
break;
case 3:
if (xp < xq)
tf[3 * e] = q;
break;
case 4:
if (xp < xq)
tf[3 * e] = q * t;
break;
case 5:
if (xp < xq)
{
tf[e + 1] = -q * (1 + r) * t / 2;
tf[5 - e] = q * t * t * (2 + r) / 6;
}
break;
case 6:
if (xp < xq)
tf[e + 4] = (2 * e - 1) * q;
break;
case 7: // temperature change don't generate internal force on cantilever beam
break;
case 8:
break;
default:
break;
}
return 0;
}
// calculate internal force of displacement
bool sfDisplacementForce(int k, double *tref) // k is the actual number of rods, tref is the end force of rods
{
if (k < 1)
{
sfPrintError(23);
return 0;
}
if (tref == NULL)
{
sfPrintError(23);
return 0;
}
int p[2] = {0}; // p is a temperary vector for i0j0
double rd[36] = {0}, rdb[36] = {0}, t[36] = {0}; // rd
memset(tref, 0, 6 * sizeof(double));
if (sfBuildTrans(k - 1, t)) // calculate transpose matrix
{
sfPrintError(10);
return 1;
}
p[0] = 6 * (rods[k - 1].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[k - 1].ENR - NFIN - 1);
for (int i = 0; i < 2; i++)
{
if (p[i] >= 0) // determine free node
{
if (sfBuildLocalStiff(k - 1, 2 * i + 1, rd)) // build unit stiffness matrix
{
sfPrintError(9);
return 1;
}
memset(rdb, 0, 36 * sizeof(double)); // zero clean rdb
for (int j = 0; j < 6; j++) // rd times transposition of transpose matrix
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
rdb[j * 6 + m] += rd[j * 6 + n] * t[m * 6 + n];
for (int j = 0; j < 6; j++) // rdb times DON
for (int m = 0; m < 6; m++)
tref[j] += rdb[j * 6 + m] * Displacement[p[i] + m];
}
else // fixed node
for (int j = 0; j < 3; j++)
tref[j] += 0;
}
return 0;
}
// print"----------------------------------------"
bool sfPrintLine()
{
printf("--------------------------------------------------------------------------\n");
return 0;
}
// print"****************************************"
bool sfPrintLine2()
{
printf("**************************************************************************\n");
return 0;
}
// print error
bool sfPrintError(int error)
{
printf("ERROR:\t");
switch (error)
{
case 1:
printf("Data input failed!\n");
break;
case 2:
printf("Building total stiffness matrix failed!\n");
break;
case 3:
printf("Building load vector failed!\n");
break;
case 4:
printf("Solving equation failed!\n");
break;
case 5:
printf("Calculating internal force failed!\n");
break;
case 6:
printf("Calculating length, cosine and sine failed!\n");
break;
case 7:
printf("Building unit stiffness matrix failed!\n");
break;
case 8:
printf("The length of a rod is too small!\n");
break;
case 9:
printf("Building local stiffness matrix filed!\n");
break;
case 10:
printf("Building transpose matrix failed!\n");
break;
case 11:
printf("Calculating reaction force failed!\n");
break;
case 12:
printf("There is something wrong in the equation!\n");
break;
case 13:
printf("calculating internal force of cantilever beam failed!\n");
break;
case 14:
printf("Calculating end force failed!\n");
break;
case 15:
printf("Allocating total stiffness matrix failed!\n");
break;
case 16:
printf("There is something wrong in building unit stiffness matrix!\n");
break;
case 17:
printf("There is something wrong in building local stiffness matrix!\n");
break;
case 18:
printf("There is something wrong in building transpose matrix failed!\n");
break;
case 19:
printf("There is something wrong in building load vector!\n");
break;
case 20:
printf("There is something wrong in calculating reaction force!\n");
break;
case 21:
printf("There is something wrong in calculating internal force!\n");
break;
case 22:
printf("There is something wrong in calculating internal force of cantilever!\n");
break;
case 23:
printf("There is something wrong in calculating internal force of displacement!\n");
break;
case 24:
printf("!\n");
break;
case 25:
printf("!\n");
break;
default:
break;
}
printf("There is at least one error in your file, please check it and try it one more time.\n");
return 0;
}
public:
SpaceFrame();
SpaceFrame(SpaceFrame &);
~SpaceFrame();
// read data from .csv
bool sfInput();
// calculate
bool sfCalculate(bool, bool);
// output data
bool sfOutput();
// create circular structure
bool sfCircularStructure(int, int, int);
};
SpaceFrame::SpaceFrame()
{
EPS = 1e-15;
MAXTS = 0;
MAXLV = 0;
TNN = 0; // total number of nodes
NFIN = 0; // number of fixed nodes
NFRN = 0; // number of free nodes
NOR = 0; // number of rods
NOL = 0; // number of loads
NOS = 0; // number of sections
nodes = NULL; // parameters of nodes
rods = NULL; // parameters of rods
loads = NULL; // parameters of loads
sections = NULL; // parameters of sections
TotalStiffness = NULL; // total stiffness
LoadVector = NULL; // load vector
Displacement = NULL; // the displacement of nodes
IV = NULL; // the location of diagonal element
NSI = 0; // upper limit
MAXIBDW = 0; // half bandwidth
ProgressBar = 1; // open progress bar
Parallel = 1; // open parallel
}
SpaceFrame::SpaceFrame(SpaceFrame &Frame)
{
EPS = Frame.EPS;
MAXTS = Frame.MAXTS;
MAXLV = Frame.MAXLV;
TNN = Frame.TNN;
NFIN = Frame.NFIN;
NFRN = Frame.NFRN;
NOR = Frame.NOR;
NOL = Frame.NOL;
NOS = Frame.NOS;
nodes = new Node[TNN]();
if (Frame.nodes != NULL)
memcpy(nodes, Frame.nodes, TNN * sizeof(Node));
rods = new Rod[NOR]();
if (Frame.rods != NULL)
memcpy(rods, Frame.rods, NOR * sizeof(Rod));
loads = new Load[NOL]();
if (Frame.loads != NULL)
memcpy(loads, Frame.loads, NOL * sizeof(Load));
sections = new Section[NOS]();
if (Frame.sections != NULL)
memcpy(sections, Frame.sections, NOS * sizeof(Section));
int dof = 6 * NFRN;
IV = new int[dof]();
if (Frame.IV != NULL)
memcpy(IV, Frame.IV, dof * sizeof(int));
NSI = Frame.NSI;
MAXIBDW = Frame.MAXIBDW;
TotalStiffness = new double[NSI]();
if (Frame.TotalStiffness != NULL)
memcpy(TotalStiffness, Frame.TotalStiffness, NSI * sizeof(double));
LoadVector = new double[dof]();
if (Frame.LoadVector != NULL)
memcpy(LoadVector, Frame.LoadVector, dof * sizeof(double));
Displacement = new double[dof]();
if (Frame.Displacement != NULL)
memcpy(Displacement, Frame.Displacement, dof * sizeof(double));
ProgressBar = Frame.ProgressBar;
Parallel = Frame.Parallel;
}
SpaceFrame::~SpaceFrame()
{
delete[] nodes;
nodes = NULL;
delete[] rods;
rods = NULL;
delete[] loads;
loads = NULL;
delete[] sections;
sections = NULL;
delete[] TotalStiffness;
TotalStiffness = NULL;
delete[] LoadVector;
LoadVector = NULL;
delete[] Displacement;
Displacement = NULL;
delete[] IV;
IV = NULL;
}
bool SpaceFrame::sfInput()
{
FILE *fp = NULL; // Define the file point
char *line = 0, *data = 0; // Define the line string and separated string
char temporSpace[1000000]; // Apply for temporary storage space
int rowIndex = 0, columnIndex = 0; // Reset the number of rows to zero, reset the number of columns to zero
const char DIVIDE[] = ","; // Set the separater as a ','
if ((fp = fopen("source&result/sf_test.csv", "r")) == NULL) // Start the process when the file opens successfully
{
printf("There is no such file!");
return 0;
}
fseek(fp, 0L, SEEK_SET); // Locate file point to the first line
while ((line = fgets(temporSpace, sizeof(temporSpace), fp)) != NULL) // The loop continues when the end of the file is not read
{
data = strtok(line, DIVIDE); // Split strings with a ',' as a separator
while (data != NULL) // Read the data of each row
{
if (strcmp(data, "END") == 0) // When the keyword 'END' is read, the reading process will be shut down
{
fclose(fp); // Close the file
fp = NULL; // Reset the file point
cout << "Inputing data succeed!\n";
return 0;
}
if (columnIndex++ == 0) // Skip the saving of the first column
{
data = strtok(NULL, DIVIDE); // Reset data
continue;
}
switch (rowIndex) // Store variables of each column in different ways
{
case 0:
break;
case 1:
if (columnIndex == 2)
TNN = atoi(data);
break;
case 2:
if (columnIndex == 2)
NFIN = atoi(data);
NFRN = TNN - NFIN;
break;
case 3:
if (columnIndex == 2)
NOR = atoi(data);
break;
case 4:
if (columnIndex == 2)
NOL = atoi(data);
break;
case 5:
if (columnIndex == 2)
{
NOS = atoi(data);
if (nodes != NULL)
this->~SpaceFrame();
nodes = new Node[TNN]();
rods = new Rod[NOR]();
loads = new Load[NOL]();
sections = new Section[NOS]();
}
break;
case 6:
if (columnIndex - 2 < TNN)
nodes[columnIndex - 2].XCN = atof(data);
break;
case 7:
if (columnIndex - 2 < TNN)
nodes[columnIndex - 2].YCN = atof(data);
break;
case 8:
if (columnIndex - 2 < TNN)
nodes[columnIndex - 2].ZCN = atof(data);
break;
case 9:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].BNR = atoi(data);
break;
case 10:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].ENR = atoi(data);
break;
case 11:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].ELASTIC = atof(data);
break;
case 12:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].SHEAR = atof(data);
break;
case 13:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].AREA = atof(data);
break;
case 14:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].IMY = atof(data);
break;
case 15:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].IMZ = atof(data);
break;
case 16:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].THETA = atof(data);
break;
case 17:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].NRL = atoi(data);
break;
case 18:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].PLI = atoi(data);
break;
case 19:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].KOL = atoi(data);
break;
case 20:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].VOL = atof(data);
break;
case 21:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].DLB = atof(data);
break;
case 22:
if (columnIndex - 2 < NOS)
sections[columnIndex - 2].NRS = atoi(data);
break;
case 23:
if (columnIndex - 2 < NOS)
sections[columnIndex - 2].DSB = atof(data);
break;
} // input finished
data = strtok(NULL, DIVIDE); // Reset data
}
rowIndex++; // RowIndex steps forward once
columnIndex = 0; // Reset columnIndex
}
fclose(fp); // Close the file
fp = NULL; // Reset the file point
return 0;
}
bool SpaceFrame::sfOutput()
{
if (false) // console
{
printf("\t\t\tCalculation Of Space Rigid Frame\n");
sfPrintLine();
printf("\t\tTNN = %d\t\t\tNFIN = %d\n\t\tNFRN = %d\t\tNOR = %d\n", TNN, NFIN, NFRN, NOR);
printf("\t\tNOL = %d\t\t\tNOS = %d\n", NOL, NOS);
sfPrintLine();
printf("NUMBER OF NODES Coordinate-X Coordinate-Y Coordinate-Z\n");
for (int i = 0; i < TNN; i++)
printf("%15d%15.7f%15.7f%15.7f\n", i + 1, nodes[i].XCN, nodes[i].YCN, nodes[i].ZCN);
sfPrintLine();
printf("NUMBER OF NODES LEFT NODES RIGHT NODES Elastic modulus Shear modulus Area Inertia moment Y axis Inertia moment Z axis\n");
for (int i = 0; i < NOR; i++)
printf("%15d%15d%15d%15.0f%15.0f%11.4f%16.5f%23.5f\n", i + 1, rods[i].BNR, rods[i].ENR, rods[i].ELASTIC, rods[i].SHEAR, rods[i].AREA, rods[i].IMY, rods[i].IMZ);
sfPrintLine();
printf("NUMBER OF SECTIONS PLI DLB\n");
for (int i = 0; i < NOS; i++)
printf("%15d%15d%15.7f\n", i + 1, sections[i].NRS, sections[i].DSB);
sfPrintLine();
printf("Calculating......\nThe results are as follows: \n");
sfPrintLine();
printf("NUMBER OF NODES Displacement-X Displacement-Y Displacement-Z Diversion-X Diversion-Y Diversion-Z\n");
for (int i = NFIN; i < TNN; i++)
printf("%15d%15.7f%15.7f%15.7f%15.7f%15.7f%15.7f\n", i + 1, Displacement[6 * (i - NFIN)], Displacement[6 * (i - NFIN) + 1], Displacement[6 * (i - NFIN) + 2], Displacement[6 * (i - NFIN) + 3], Displacement[6 * (i - NFIN) + 4], Displacement[6 * (i - NFIN) + 5]);
sfPrintLine();
printf("NUMBER OF SECTIONS Axial force-X Shear force-Y Shear force-Z Torque-X Bending moment-Y Bending moment-Z\n");
for (int i = 0; i < NOS; i++)
printf("%15d%15.7f%15.7f%15.7f%15.7f%15.7f%15.7f\n", i + 1, sections[i].IFS[0], sections[i].IFS[1], sections[i].IFS[2], sections[i].IFS[3], sections[i].IFS[4], sections[i].IFS[5]);
}
if (true) // file
{
FILE *fp = NULL;
fp = fopen("source&result/sfResultClass.csv", "w");
fprintf(fp, "TITLE\n");
fprintf(fp, "TNN,%d\nNFIN,%d\nNFRN,%d\nNOR,%d\nNOL,%d\nNOS,%d", TNN, NFIN, NFRN, NOR, NOL, NOS);
// ------------NODES-------------------------------------------------
fprintf(fp, "\nNODES,");
for (int i = 0; i < TNN; i++)
fprintf(fp, "%d,", i + 1);
fprintf(fp, "\nCON,");
for (int i = 0; i < TNN; i++)
fprintf(fp, "(%f %f %f),", nodes[i].XCN, nodes[i].YCN, nodes[i].ZCN);
// ------------RODS-------------------------------------------------
fprintf(fp, "\nRODS,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%d,", i + 1);
fprintf(fp, "\nBNR->ENR,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "p%d -> p%d,", rods[i].BNR, rods[i].ENR);
fprintf(fp, "\nELASTIC,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%f,", rods[i].ELASTIC);
fprintf(fp, "\nSHEAR,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%f,", rods[i].SHEAR);
fprintf(fp, "\nAREA,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%f,", rods[i].AREA);
fprintf(fp, "\nIMY,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%f,", rods[i].IMY);
fprintf(fp, "\nIMZ,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%f,", rods[i].IMZ);
fprintf(fp, "\nTHETA,");
for (int i = 0; i < NOR; i++)
fprintf(fp, "%f,", rods[i].THETA);
// ------------LOADS-------------------------------------------------
fprintf(fp, "\nLOADS,");
for (int i = 0; i < NOL; i++)
fprintf(fp, "%d,", i + 1);
fprintf(fp, "\nPLI,");
for (int i = 0; i < NOL; i++)
fprintf(fp, "%d,", loads[i].PLI);
fprintf(fp, "\nNRL,");
for (int i = 0; i < NOL; i++)
fprintf(fp, "%d,", loads[i].NRL);
fprintf(fp, "\nKOL,");
for (int i = 0; i < NOL; i++)
fprintf(fp, "%d,", loads[i].KOL);
fprintf(fp, "\nVOL,");
for (int i = 0; i < NOL; i++)
fprintf(fp, "%f,", loads[i].VOL);
fprintf(fp, "\nDLB,");
for (int i = 0; i < NOL; i++)
fprintf(fp, "%f,", loads[i].DLB);
// -----------SECTIONS-------------------------------------------------
fprintf(fp, "\nNOS,");
for (int i = 0; i < NOS; i++)
fprintf(fp, "%d,", i + 1);
fprintf(fp, "\nNRS,");
for (int i = 0; i < NOS; i++)
fprintf(fp, "%d,", sections[i].NRS);
fprintf(fp, "\nDSB,");
for (int i = 0; i < NOS; i++)
fprintf(fp, "%f,", sections[i].DSB);
// -----------RESULTS OF NODES-----------------------------------------
fprintf(fp, "\nNFRN,");
for (int i = 0; i < NFRN; i++)
fprintf(fp, "x%d,y%d,z%d,", i + NFIN, i + NFIN, i + NFIN);
fprintf(fp, "\nDISPLACEMENT,");
for (int i = 0; i < NFRN; i++)
fprintf(fp, "%f,%f,%f,", Displacement[6 * i], Displacement[6 * i + 1], Displacement[6 * i + 2]);
fprintf(fp, "\nDIVERSION,");
for (int i = 0; i < NFRN; i++)
fprintf(fp, "%f,%f,%f,", Displacement[6 * i + 3], Displacement[6 * i + 4], Displacement[6 * i + 5]);
// -----------RESULTS OF SECTIONS--------------------------------------
fprintf(fp, "\nNOS,");
for (int i = 0; i < NOS; i++)
fprintf(fp, "x%d(AXIAL),y%d(SHEAR),z%d(SHEAR),", i + NOS + 1, i + NOS + 1, i + NOS + 1);
fprintf(fp, "\nAXIAL&SHEAR FORCE,");
for (int i = 0; i < NOS; i++)
fprintf(fp, "%f,%f,%f,", sections[i].IFS[0], sections[i].IFS[1], sections[i].IFS[2]);
fprintf(fp, "\nTORQUE&BENDING MOMENT,");
for (int i = 0; i < NOS; i++)
fprintf(fp, "%f,%f,%f,", sections[i].IFS[3], sections[i].IFS[4], sections[i].IFS[5]);
fclose(fp);
}
return 0;
}
bool SpaceFrame::sfCalculate(bool parallel = true, bool progress_bar = true)
{
ProgressBar = progress_bar, Parallel = parallel;
if (sfLCosSin()) // calculate the length, cosine and sine of all rods
{
sfPrintError(6);
return 1;
}
else
printf("Calculating length, cosine and sine succeed!\n");
if (sfAllocate())
{
sfPrintError(15);
return 0;
}
else
printf("Allocating Variable Bandwith Matrix succeed!\n");
if (sfBuildTotalStiff()) // build total stiffness matrix
{
sfPrintError(2);
return 1;
}
else
printf("Building total stiffness matrix succeeded!\n");
if (sfBuildLoadVector(LoadVector)) // build load stiffness vector
{
sfPrintError(3);
return 1;
}
else
printf("Building load vector succeeded!\n");
if (Parallel)
{
if (sfConjugateGradientPar(TotalStiffness, LoadVector, Displacement, 6 * NFRN)) // solve matrix equation
{
sfPrintError(4);
return 1;
}
else
printf("Solving equation succeeded!\n");
}
else
{
if (sfConjugateGradient(TotalStiffness, LoadVector, Displacement, 6 * NFRN)) // solve matrix equation
{
sfPrintError(4);
return 1;
}
else
printf("Solving equation succeeded!\n");
}
for (int i = 0; i < NOS; i++)
if (sfInternalForce(i, sections[i].NRS, sections[i].DSB)) // calculate the internal force of each rods
{
sfPrintError(5);
return 1;
}
cout << "Outputing data succeed!\n";
return 0;
}
bool SpaceFrame::sfCircularStructure(int m, int n, int l)
{
FILE *fp = 0;
fp = fopen("source&result/sf_test.csv", "w");
fprintf(fp, "Stress Test, degree of freedom is %d,\n", ((m + 1) * (n + 1) * (l + 1) - (m + 1) * (n + 1)) * 6);
fprintf(fp, "TNN,%d,\n", (m + 1) * (n + 1) * (l + 1));
fprintf(fp, "NFIN,%d,\n", (m + 1) * (n + 1));
int nor = ((2 * m + 1) * (2 * n + 1) - m * n) * l;
fprintf(fp, "NOR,%d,\n", nor);
fprintf(fp, "NOL,%d,\n", (m + 1) * (n + 1));
fprintf(fp, "NOS,%d,\n", (m + 1) * (n + 1));
fprintf(fp, "XCN,");
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fprintf(fp, "%d,", k);
fprintf(fp, "\n");
fprintf(fp, "YCN,");
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fprintf(fp, "%d,", j);
fprintf(fp, "\n");
fprintf(fp, "ZCN,");
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fprintf(fp, "%d,", i);
fprintf(fp, "\n");
fprintf(fp, "BNR,");
for (int i = 0; i < l; i++)
{
for (int j = 0; j < (m + 1) * (n + 1); j++)
fprintf(fp, "%d,", j + 1 + i * (m + 1) * (n + 1));
for (int j = 0; j < (m + 1) * n; j++)
fprintf(fp, "%d,", j + 1 + (i + 1) * (m + 1) * (n + 1));
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m; k++)
fprintf(fp, "%d,", k + 1 + j * (m + 1) + (i + 1) * (m + 1) * (n + 1));
}
fprintf(fp, "\n");
fprintf(fp, "ENR,");
for (int i = 0; i < l; i++)
{
for (int j = 0; j < (m + 1) * (n + 1); j++)
fprintf(fp, "%d,", j + 1 + (i + 1) * (m + 1) * (n + 1));
for (int j = 0; j < (m + 1) * n; j++)
fprintf(fp, "%d,", j + 1 + m + 1 + (i + 1) * (m + 1) * (n + 1));
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m; k++)
fprintf(fp, "%d,", k + 2 + j * (m + 1) + (i + 1) * (m + 1) * (n + 1));
}
fprintf(fp, "\n");
fprintf(fp, "ELASTIC,");
for (int i = 0; i < nor; i++)
{
fprintf(fp, "%d,", 210000000 + 100000 * (rand() % 1000));
}
fprintf(fp, "\n");
fprintf(fp, "SHEAR,");
for (int i = 0; i < nor; i++)
{
fprintf(fp, "%d,", 80769000);
}
fprintf(fp, "\n");
fprintf(fp, "AREA,");
for (int i = 0; i < nor; i++)
{
fprintf(fp, "%f,", 0.007854);
}
fprintf(fp, "\n");
fprintf(fp, "IMY,");
for (int i = 0; i < nor; i++)
{
fprintf(fp, "%.10f,", 0.0000040001 + 0.0000000001 * (rand() % 10000));
}
fprintf(fp, "\n");
fprintf(fp, "IMZ,");
for (int i = 0; i < nor; i++)
{
fprintf(fp, "%.10f,", 0.0000040001 + 0.0000000001 * (rand() % 10000));
}
fprintf(fp, "\n");
fprintf(fp, "THETA,");
for (int i = 0; i < nor; i++)
{
fprintf(fp, "%d,", 0);
}
fprintf(fp, "\n");
fprintf(fp, "NRL,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%d,", i + 1 + ((2 * m + 1) * (2 * n + 1) - m * n) * (l - 1));
}
fprintf(fp, "\n");
fprintf(fp, "PLI,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%d,", 0);
}
fprintf(fp, "\n");
fprintf(fp, "KOL,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%d,", 3);
}
fprintf(fp, "\n");
fprintf(fp, "VOL,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%d,", 1000 + rand() % 1000);
}
fprintf(fp, "\n");
fprintf(fp, "DLB,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%d,", 1);
}
fprintf(fp, "\n");
fprintf(fp, "NRS,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%d,", i + 1);
}
fprintf(fp, "\n");
fprintf(fp, "DSB,");
for (int i = 0; i < (m + 1) * (n + 1); i++)
{
fprintf(fp, "%f,", 0.5);
}
fprintf(fp, "\nEND,");
fclose(fp);
fp = NULL;
return 0;
}
|
pr68128-1.c | /* PR tree-optimization/68128 */
/* { dg-do compile } */
/* { dg-options "-Ofast -fopenmp -fdump-tree-vect-details" } */
/* { dg-additional-options "-mavx" { target i?86-*-* x86_64-*-* } } */
/* Make sure the following loop is vectorized even when not using
firstprivate variables for scalar vars that are not modified
in the parallel region. */
void
foo (float *u, float v, float w, float x, float y, float z, float t)
{
int i, j, k, l;
float a, *b, c, s, e;
#pragma omp parallel for private (i, j, k, l, a, b, c, s, e)
for (j = 0; j < 1024; j++)
{
k = j * 64;
l = j * 64 + 63;
a = v + j * w;
b = u + j * 64;
for (i = k; i <= l; i++, b++, a += w)
{
c = a * a + y;
s = (1.f - c * x) * (1.f - c * x);
e = t * (1 / __builtin_sqrtf (c)) * s;
*b += (c < z ? e : 0);
}
}
}
/* { dg-final { scan-tree-dump "note: vectorized 1 loops in function" "vect" { target i?86-*-* x86_64-*-* } } } */
|
heat.c | /*********************************************************************************/
/* */
/* Animation of heat equation in a planar domain */
/* */
/* N. Berglund, May 2021 */
/* */
/* Feel free to reuse, but if doing so it would be nice to drop a */
/* line to nils.berglund@univ-orleans.fr - Thanks! */
/* */
/* compile with */
/* gcc -o heat heat.c */
/* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */
/* */
/* To make a video, set MOVIE to 1 and create subfolder tif_heat */
/* It may be possible to increase parameter PAUSE */
/* */
/* create movie using */
/* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */
/* */
/*********************************************************************************/
/*********************************************************************************/
/* */
/* NB: The algorithm used to simulate the wave equation is highly paralellizable */
/* One could make it much faster by using a GPU */
/* */
/*********************************************************************************/
#include <math.h>
#include <string.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <unistd.h>
#include <sys/types.h>
#include <tiffio.h> /* Sam Leffler's libtiff library. */
#include <omp.h>
#define MOVIE 0 /* set to 1 to generate movie */
/* General geometrical parameters */
#define WINWIDTH 1280 /* window width */
#define WINHEIGHT 720 /* window height */
#define NX 1280 /* number of grid points on x axis */
#define NY 720 /* number of grid points on y axis */
// #define NX 640 /* number of grid points on x axis */
// #define NY 360 /* number of grid points on y axis */
/* setting NX to WINWIDTH and NY to WINHEIGHT increases resolution */
/* but will multiply run time by 4 */
// #define XMIN -2.0
// #define XMAX 2.0 /* x interval */
#define XMIN -2.5
#define XMAX 1.5 /* x interval */
#define YMIN -1.125
#define YMAX 1.125 /* y interval for 9/16 aspect ratio */
#define JULIA_SCALE 0.5 /* scaling for Julia sets */
/* Choice of the billiard table */
#define B_DOMAIN 26 /* choice of domain shape, see list in global_pdes.c */
#define CIRCLE_PATTERN 0 /* pattern of circles, see list in global_pdes.c */
#define P_PERCOL 0.25 /* probability of having a circle in C_RAND_PERCOL arrangement */
#define NPOISSON 300 /* number of points for Poisson C_RAND_POISSON arrangement */
#define RANDOM_POLY_ANGLE 0 /* set to 1 to randomize angle of polygons */
#define LAMBDA -1.0 /* parameter controlling the dimensions of domain */
#define MU 0.1 /* parameter controlling the dimensions of domain */
#define NPOLY 6 /* number of sides of polygon */
#define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */
#define MDEPTH 5 /* depth of computation of Menger gasket */
#define MRATIO 5 /* ratio defining Menger gasket */
#define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */
#define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */
#define FOCI 1 /* set to 1 to draw focal points of ellipse */
#define NGRIDX 15 /* number of grid point for grid of disks */
#define NGRIDY 20 /* number of grid point for grid of disks */
#define X_SHOOTER -0.2
#define Y_SHOOTER -0.6
#define X_TARGET 0.4
#define Y_TARGET 0.7 /* shooter and target positions in laser fight */
#define ISO_XSHIFT_LEFT -1.65
#define ISO_XSHIFT_RIGHT 0.4
#define ISO_YSHIFT_LEFT -0.05
#define ISO_YSHIFT_RIGHT -0.05
#define ISO_SCALE 0.85 /* coordinates for isospectral billiards */
/* You can add more billiard tables by adapting the functions */
/* xy_in_billiard and draw_billiard in sub_wave.c */
/* Physical patameters of wave equation */
// #define DT 0.00001
#define DT 0.000004
// #define DT 0.000002
// #define DT 0.00000002
// #define DT 0.000000005
#define VISCOSITY 10.0
#define T_OUT 2.0 /* outside temperature */
#define T_IN 0.0 /* inside temperature */
// #define T_OUT 0.0 /* outside temperature */
// #define T_IN 2.0 /* inside temperature */
#define SPEED 0.0 /* speed of drift to the right */
/* Boundary conditions, see list in global_pdes.c */
#define B_COND 1
/* Parameters for length and speed of simulation */
#define NSTEPS 1000 /* number of frames of movie */
#define NVID 50 /* number of iterations between images displayed on screen */
// #define NVID 100 /* number of iterations between images displayed on screen */
#define NSEG 100 /* number of segments of boundary */
#define BOUNDARY_WIDTH 1 /* width of billiard boundary */
#define PAUSE 100 /* number of frames after which to pause */
#define PSLEEP 1 /* sleep time during pause */
#define SLEEP1 2 /* initial sleeping time */
#define SLEEP2 1 /* final sleeping time */
/* For debugging purposes only */
#define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */
#define VMAX 10.0 /* max value of wave amplitude */
/* Field representation */
#define FIELD_REP 1
#define F_INTENSITY 0 /* color represents intensity */
#define F_GRADIENT 1 /* color represents norm of gradient */
#define DRAW_FIELD_LINES 1 /* set to 1 to draw field lines */
#define FIELD_LINE_WIDTH 1 /* width of field lines */
#define N_FIELD_LINES 120 /* number of field lines */
#define FIELD_LINE_FACTOR 120 /* factor controlling precision when computing origin of field lines */
/* Color schemes, see list in global_pdes.c */
#define COLOR_PALETTE 10 /* Color palette, see list in global_pdes.c */
#define BLACK 1 /* black background */
#define COLOR_SCHEME 1 /* choice of color scheme */
#define SCALE 0 /* set to 1 to adjust color scheme to variance of field */
// #define SLOPE 0.1 /* sensitivity of color on wave amplitude */
#define SLOPE 0.2 /* sensitivity of color on wave amplitude */
#define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */
#define E_SCALE 100.0 /* scaling factor for energy representation */
#define COLORHUE 260 /* initial hue of water color for scheme C_LUM */
#define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */
#define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */
#define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */
// #define HUEMEAN 180.0 /* mean value of hue for color scheme C_HUE */
// #define HUEAMP -180.0 /* amplitude of variation of hue for color scheme C_HUE */
#define HUEMEAN 359.0 /* mean value of hue for color scheme C_HUE */
#define HUEAMP -359.0 /* amplitude of variation of hue for color scheme C_HUE */
// #define HUEMEAN 270.0 /* mean value of hue for color scheme C_HUE */
// #define HUEAMP -130.0 /* amplitude of variation of hue for color scheme C_HUE */
#define DRAW_COLOR_SCHEME 0 /* set to 1 to plot the color scheme */
#define COLORBAR_RANGE 2.0 /* scale of color scheme bar */
#define COLORBAR_RANGE_B 12.0 /* scale of color scheme bar for 2nd part */
#define ROTATE_COLOR_SCHEME 0 /* set to 1 to draw color scheme horizontally */
#include "global_pdes.c"
#include "sub_wave.c"
double courant2; /* Courant parameter squared */
double dx2; /* spatial step size squared */
double intstep; /* integration step */
double intstep1; /* integration step used in absorbing boundary conditions */
void init_gaussian(double x, double y, double mean, double amplitude, double scalex,
double *phi[NX], short int * xy_in[NX])
/* initialise field with gaussian at position (x,y) */
{
int i, j, in;
double xy[2], dist2, module, phase, scale2;
scale2 = scalex*scalex;
printf("Initialising field\n");
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
in = xy_in[i][j];
if (in == 1)
{
dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
module = amplitude*exp(-dist2/scale2);
if (module < 1.0e-15) module = 1.0e-15;
phi[i][j] = mean + module/scalex;
} /* boundary temperatures */
else if (in >= 2) phi[i][j] = T_IN*pow(0.75, (double)(in-2));
// else if (in >= 2) phi[i][j] = T_IN*pow(1.0 - 0.5*(double)(in-2), (double)(in-2));
// else if (in >= 2) phi[i][j] = T_IN*(1.0 - (double)(in-2)/((double)MDEPTH))*(1.0 - (double)(in-2)/((double)MDEPTH));
else phi[i][j] = T_OUT;
}
}
void init_julia_set(double *phi[NX], short int * xy_in[NX])
/* change Julia set boundary condition */
{
int i, j, in;
double xy[2], dist2, module, phase, scale2;
// printf("Changing Julia set\n");
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
in = xy_in[i][j];
if (in >= 2) phi[i][j] = T_IN;
}
}
/*********************/
/* animation part */
/*********************/
void compute_gradient(double *phi[NX], double *nablax[NX], double *nablay[NX])
/* compute the gradient of the field */
{
int i, j, iplus, iminus, jplus, jminus;
double dx;
dx = (XMAX-XMIN)/((double)NX);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
jplus = j+1; if (jplus == NX) jplus = NY-1;
jminus = j-1; if (jminus == -1) jminus = 0;
nablax[i][j] = (phi[iplus][j] - phi[iminus][j])/dx;
nablay[i][j] = (phi[i][jplus] - phi[i][jminus])/dx;
}
}
void draw_field_line(double x, double y, short int *xy_in[NX], double *nablax[NX],
double *nablay[NX], double delta, int nsteps)
/* draw a field line of the gradient, starting in (x,y) */
{
double x1, y1, x2, y2, pos[2], nabx, naby, norm2, norm;
int i = 0, ij[2], cont = 1;
glColor3f(1.0, 1.0, 1.0);
// glColor3f(0.0, 0.0, 0.0);
glLineWidth(FIELD_LINE_WIDTH);
x1 = x;
y1 = y;
// printf("Drawing field line \n");
glEnable(GL_LINE_SMOOTH);
glBegin(GL_LINE_STRIP);
xy_to_pos(x1, y1, pos);
glVertex2d(pos[0], pos[1]);
i = 0;
while ((cont)&&(i < nsteps))
{
xy_to_ij(x1, y1, ij);
if (ij[0] < 0) ij[0] = 0;
if (ij[0] > NX-1) ij[0] = NX-1;
if (ij[1] < 0) ij[1] = 0;
if (ij[1] > NY-1) ij[1] = NY-1;
nabx = nablax[ij[0]][ij[1]];
naby = nablay[ij[0]][ij[1]];
norm2 = nabx*nabx + naby*naby;
if (norm2 > 1.0e-14)
{
/* avoid too large step size */
if (norm2 < 1.0e-9) norm2 = 1.0e-9;
norm = sqrt(norm2);
x1 = x1 + delta*nabx/norm;
y1 = y1 + delta*naby/norm;
}
else cont = 0;
if (!xy_in[ij[0]][ij[1]]) cont = 0;
/* stop if the boundary is hit */
// if (xy_in[ij[0]][ij[1]] != 1) cont = 0;
// printf("x1 = %.3lg \t y1 = %.3lg \n", x1, y1);
xy_to_pos(x1, y1, pos);
glVertex2d(pos[0], pos[1]);
i++;
}
glEnd();
}
void draw_wave(double *phi[NX], short int *xy_in[NX], double scale, int time)
/* draw the field */
{
int i, j, iplus, iminus, jplus, jminus, ij[2], counter = 0;
static int first = 1;
double rgb[3], xy[2], x1, y1, x2, y2, dx, value, angle, dangle, intens, deltaintens, sum = 0.0;
double *nablax[NX], *nablay[NX];
static double linex[N_FIELD_LINES*FIELD_LINE_FACTOR], liney[N_FIELD_LINES*FIELD_LINE_FACTOR], distance[N_FIELD_LINES*FIELD_LINE_FACTOR], integral[N_FIELD_LINES*FIELD_LINE_FACTOR + 1];
for (i=0; i<NX; i++)
{
nablax[i] = (double *)malloc(NY*sizeof(double));
nablay[i] = (double *)malloc(NY*sizeof(double));
}
/* compute the gradient */
compute_gradient(phi, nablax, nablay);
/* compute the position of origins of field lines */
if ((first)&&(DRAW_FIELD_LINES))
{
first = 0;
printf("computing linex\n");
x1 = LAMBDA + MU*1.01;
y1 = 1.0;
linex[0] = x1;
liney[0] = y1;
dangle = DPI/((double)(N_FIELD_LINES*FIELD_LINE_FACTOR));
for (i = 1; i < N_FIELD_LINES*FIELD_LINE_FACTOR; i++)
{
angle = (double)i*dangle;
x2 = LAMBDA + MU*1.01*cos(angle);
y2 = 0.5 + MU*1.01*sin(angle);
linex[i] = x2;
liney[i] = y2;
distance[i-1] = module2(x2-x1,y2-y1);
x1 = x2;
y1 = y2;
}
distance[N_FIELD_LINES*FIELD_LINE_FACTOR - 1] = module2(x2- 0.99*LAMBDA,y2);
// distance[N_FIELD_LINES*FIELD_LINE_FACTOR - 1] = module2(x2-LAMBDA,y2-0.5);
}
dx = (XMAX-XMIN)/((double)NX);
glBegin(GL_QUADS);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
if (FIELD_REP == F_INTENSITY) value = phi[i][j];
else if (FIELD_REP == F_GRADIENT)
{
value = module2(nablax[i][j], nablay[i][j]);
}
if (xy_in[i][j] == 1)
{
color_scheme(COLOR_SCHEME, value, scale, time, rgb);
glColor3f(rgb[0], rgb[1], rgb[2]);
}
else glColor3f(0.0, 0.0, 0.0);
glVertex2i(i, j);
glVertex2i(i+1, j);
glVertex2i(i+1, j+1);
glVertex2i(i, j+1);
}
glEnd ();
/* draw a field line */
if (DRAW_FIELD_LINES)
{
/* compute gradient norm along boundary and its integral */
for (i = 0; i < N_FIELD_LINES*FIELD_LINE_FACTOR; i++)
{
xy_to_ij(linex[i], liney[i], ij);
intens = module2(nablax[ij[0]][ij[1]], nablay[ij[0]][ij[1]])*distance[i];
if (i > 0) integral[i] = integral[i-1] + intens;
else integral[i] = intens;
}
deltaintens = integral[N_FIELD_LINES*FIELD_LINE_FACTOR-1]/((double)N_FIELD_LINES);
// printf("delta = %.5lg\n", deltaintens);
i = 0;
draw_field_line(linex[0], liney[0], xy_in, nablax, nablay, 0.00002, 100000);
for (j = 1; j < N_FIELD_LINES+1; j++)
{
while ((integral[i] <= j*deltaintens)&&(i < N_FIELD_LINES*FIELD_LINE_FACTOR)) i++;
draw_field_line(linex[i], liney[i], xy_in, nablax, nablay, 0.00002, 100000);
counter++;
}
printf("%i lines\n", counter);
}
for (i=0; i<NX; i++)
{
free(nablax[i]);
free(nablay[i]);
}
}
void evolve_wave_half(double *phi_in[NX], double *phi_out[NX], short int *xy_in[NX])
/* time step of field evolution */
{
int i, j, iplus, iminus, jplus, jminus;
double delta1, delta2, x, y;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta1,delta2,x,y)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] == 1){
/* discretized Laplacian depending on boundary conditions */
if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING))
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1); if (jplus == NY) jplus = NY-1;
jminus = (j-1); if (jminus == -1) jminus = 0;
}
else if (B_COND == BC_PERIODIC)
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
delta1 = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j];
x = phi_in[i][j];
/* evolve phi */
if (B_COND != BC_ABSORBING)
{
phi_out[i][j] = x + intstep*(delta1 - SPEED*(phi_in[iplus][j] - phi_in[i][j]));
}
else /* case of absorbing b.c. - this is only an approximation of correct way of implementing */
{
/* in the bulk */
if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1))
{
phi_out[i][j] = x - intstep*delta2;
}
/* right border */
else if (i==NX-1)
{
phi_out[i][j] = x - intstep1*(x - phi_in[i-1][j]);
}
/* upper border */
else if (j==NY-1)
{
phi_out[i][j] = x - intstep1*(x - phi_in[i][j-1]);
}
/* left border */
else if (i==0)
{
phi_out[i][j] = x - intstep1*(x - phi_in[1][j]);
}
/* lower border */
else if (j==0)
{
phi_out[i][j] = x - intstep1*(x - phi_in[i][1]);
}
}
if (FLOOR)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
}
}
}
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
void evolve_wave(double *phi[NX], double *phi_tmp[NX], short int *xy_in[NX])
/* time step of field evolution */
{
evolve_wave_half(phi, phi_tmp, xy_in);
evolve_wave_half(phi_tmp, phi, xy_in);
}
double compute_variance(double *phi[NX], short int * xy_in[NX])
/* compute the variance (total probability) of the field */
{
int i, j, n = 0;
double variance = 0.0;
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
n++;
variance += phi[i][j]*phi[i][j];
}
}
if (n==0) n=1;
return(variance/(double)n);
}
void renormalise_field(double *phi[NX], short int * xy_in[NX], double variance)
/* renormalise variance of field */
{
int i, j;
double stdv;
stdv = sqrt(variance);
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
phi[i][j] = phi[i][j]/stdv;
}
}
}
void print_level(int level)
{
double pos[2];
char message[50];
glColor3f(1.0, 1.0, 1.0);
sprintf(message, "Level %i", level);
xy_to_pos(XMIN + 0.1, YMAX - 0.2, pos);
write_text(pos[0], pos[1], message);
}
void print_Julia_parameters()
{
double pos[2];
char message[50];
glColor3f(1.0, 1.0, 1.0);
if (julia_y >= 0.0) sprintf(message, "c = %.5f + %.5f i", julia_x, julia_y);
else sprintf(message, "c = %.5f %.5f i", julia_x, julia_y);
xy_to_pos(XMIN + 0.1, YMAX - 0.2, pos);
write_text(pos[0], pos[1], message);
}
void set_Julia_parameters(int time, double *phi[NX], short int *xy_in[NX])
{
double jangle, cosj, sinj, radius = 0.15;
jangle = (double)time*DPI/(double)NSTEPS;
// jangle = (double)time*0.001;
// jangle = (double)time*0.0001;
cosj = cos(jangle);
sinj = sin(jangle);
julia_x = -0.9 + radius*cosj;
julia_y = radius*sinj;
init_julia_set(phi, xy_in);
printf("Julia set parameters : i = %i, angle = %.5lg, cx = %.5lg, cy = %.5lg \n", time, jangle, julia_x, julia_y);
}
void set_Julia_parameters_cardioid(int time, double *phi[NX], short int *xy_in[NX])
{
double jangle, cosj, sinj, yshift;
jangle = pow(1.05 + (double)time*0.00003, 0.333);
yshift = 0.02*sin((double)time*PID*0.002);
// jangle = pow(1.0 + (double)time*0.00003, 0.333);
// jangle = pow(0.05 + (double)time*0.00003, 0.333);
// jangle = pow(0.1 + (double)time*0.00001, 0.333);
// yshift = 0.04*sin((double)time*PID*0.002);
cosj = cos(jangle);
sinj = sin(jangle);
julia_x = 0.5*(cosj*(1.0 - 0.5*cosj) + 0.5*sinj*sinj);
julia_y = 0.5*sinj*(1.0-cosj) + yshift;
// julia_x = 0.5*(cosj*(1.0 - 0.5*cosj) + 0.5*sinj*sinj);
// julia_y = 0.5*sinj*(1.0-cosj);
init_julia_set(phi, xy_in);
printf("Julia set parameters : i = %i, angle = %.5lg, cx = %.5lg, cy = %.5lg \n", time, jangle, julia_x, julia_y);
}
void animation()
{
double time, scale, dx, var, jangle, cosj, sinj;
double *phi[NX], *phi_tmp[NX];
short int *xy_in[NX];
int i, j, s;
/* Since NX and NY are big, it seemed wiser to use some memory allocation here */
for (i=0; i<NX; i++)
{
phi[i] = (double *)malloc(NY*sizeof(double));
phi_tmp[i] = (double *)malloc(NY*sizeof(double));
xy_in[i] = (short int *)malloc(NY*sizeof(short int));
}
npolyline = init_polyline(MDEPTH, polyline);
for (i=0; i<npolyline; i++) printf("vertex %i: (%.3f, %.3f)\n", i, polyline[i].x, polyline[i].y);
dx = (XMAX-XMIN)/((double)NX);
intstep = DT/(dx*dx*VISCOSITY);
intstep1 = DT/(dx*VISCOSITY);
// julia_x = 0.1;
// julia_y = 0.6;
// set_Julia_parameters(0, phi, xy_in);
printf("Integration step %.3lg\n", intstep);
/* initialize wave wave function */
init_gaussian(-1.0, 0.0, 0.1, 0.0, 0.01, phi, xy_in);
// init_gaussian(x, y, mean, amplitude, scalex, phi, xy_in)
if (SCALE)
{
var = compute_variance(phi, xy_in);
scale = sqrt(1.0 + var);
renormalise_field(phi, xy_in, var);
}
blank();
glColor3f(0.0, 0.0, 0.0);
glutSwapBuffers();
draw_wave(phi, xy_in, 1.0, 0);
draw_billiard();
// print_Julia_parameters(i);
// print_level(MDEPTH);
glutSwapBuffers();
sleep(SLEEP1);
if (MOVIE) for (i=0; i<SLEEP1*25; i++) save_frame();
for (i=0; i<=NSTEPS; i++)
{
/* compute the variance of the field to adjust color scheme */
/* the color depends on the field divided by sqrt(1 + variance) */
if (SCALE)
{
var = compute_variance(phi, xy_in);
scale = sqrt(1.0 + var);
// printf("Norm: %5lg\t Scaling factor: %5lg\n", var, scale);
renormalise_field(phi, xy_in, var);
}
else scale = 1.0;
draw_wave(phi, xy_in, scale, i);
for (j=0; j<NVID; j++) evolve_wave(phi, phi_tmp, xy_in);
draw_billiard();
// print_level(MDEPTH);
// print_Julia_parameters(i);
glutSwapBuffers();
/* modify Julia set */
// set_Julia_parameters(i, phi, xy_in);
if (MOVIE)
{
save_frame();
/* it seems that saving too many files too fast can cause trouble with the file system */
/* so this is to make a pause from time to time - parameter PAUSE may need adjusting */
if (i % PAUSE == PAUSE - 1)
{
printf("Making a short pause\n");
sleep(PSLEEP);
s = system("mv wave*.tif tif_heat/");
}
}
}
if (MOVIE)
{
for (i=0; i<20; i++) save_frame();
s = system("mv wave*.tif tif_heat/");
}
for (i=0; i<NX; i++)
{
free(phi[i]);
free(phi_tmp[i]);
}
}
void display(void)
{
glPushMatrix();
blank();
glutSwapBuffers();
blank();
glutSwapBuffers();
animation();
sleep(SLEEP2);
glPopMatrix();
glutDestroyWindow(glutGetWindow());
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WINWIDTH,WINHEIGHT);
glutCreateWindow("Heat equation in a planar domain");
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
|
pi-v11.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using all available threads */
// WARNING : correct code
#pragma omp parallel private(i,x) reduction(+:sum)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(guided,10)
for (i=0; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
GxB_Monoid_terminal.c | //------------------------------------------------------------------------------
// GxB_Monoid_terminal: return the terminal of a monoid (if any)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Monoid_terminal // return the monoid terminal
(
bool *has_terminal, // true if the monoid has a terminal value
void *terminal, // returns the terminal of the monoid,
// unmodified if has_terminal is false
GrB_Monoid monoid // monoid to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Monoid_terminal (&has_terminal, &terminal, monoid)") ;
GB_RETURN_IF_NULL (has_terminal) ;
GB_RETURN_IF_NULL (terminal) ;
GB_RETURN_IF_NULL_OR_FAULTY (monoid) ;
ASSERT_MONOID_OK (monoid, "monoid for terminal", GB0) ;
//--------------------------------------------------------------------------
// return the terminal
//--------------------------------------------------------------------------
(*has_terminal) = (monoid->terminal != NULL) ;
if (*has_terminal)
{
memcpy (terminal, monoid->terminal, monoid->op->ztype->size) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_unaryop__identity_bool_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_uint64
// op(A') function: GB_tran__identity_bool_uint64
// C type: bool
// A type: uint64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_uint64
(
bool *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nbody_tools.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "ui.h"
#include "nbody.h"
#include "nbody_tools.h"
#include "nbody_alloc.h"
extern node_t* root;
/* draw recursively the content of a node */
void draw_node(node_t* n) {
#ifndef DISPLAY
return;
#else
if(!n)
return;
#if DRAW_BOXES
int x1 = POS_TO_SCREEN(n->x_min);
int y1 = POS_TO_SCREEN(n->y_min);
int x2 = POS_TO_SCREEN(n->x_max);
int y2 = POS_TO_SCREEN(n->y_max);
draw_rect(x1, y1, x2, y2);
#endif
if(n->particle) {
int x = POS_TO_SCREEN(n->particle->x_pos);
int y = POS_TO_SCREEN(n->particle->y_pos);
draw_point (x,y);
}
if(n->children) {
#if 0
/* draw a red point that represents the center of the node */
int x = POS_TO_SCREEN(n->x_center);
int y = POS_TO_SCREEN(n->y_center);
draw_red_point (x,y);
#endif
int i;
for(i=0; i<4; i++) {
draw_node(&n->children[i]);
}
}
#endif
}
/* print recursively the particles of a node */
void print_particles(FILE* f, node_t*n) {
if(!n) {
return;
}
if(n->particle) {
particle_t*p = n->particle;
fprintf(f, "particle={pos=(%f,%f), vel=(%f,%f)}\n", p->x_pos, p->y_pos, p->x_vel, p->y_vel);
}
if(n->children) {
int i;
for(i=0; i<4; i++) {
print_particles(f, &n->children[i]);
}
}
}
/* Initialize a node */
void init_node(node_t* n, node_t* parent, double x_min, double x_max, double y_min, double y_max) {
n->parent = parent;
n->children = NULL;
n->n_particles = 0;
n->particle = NULL;
n->x_min = x_min;
n->x_max = x_max;
n->y_min = y_min;
n->y_max = y_max;
n->depth = 0;
int depth=1;
while(parent) {
if(parent->depth < depth) {
parent->depth = depth;
depth++;
}
parent = parent->parent;
}
n->mass= 0;
n->x_center = 0;
n->y_center = 0;
assert(x_min != x_max);
assert(y_min != y_max);
}
/* Compute the position of a particle in a node and return
* the quadrant in which it should be placed
*/
int get_quadrant(particle_t* particle, node_t*node) {
double x_min = node->x_min;
double x_max = node->x_max;
double x_center = x_min+(x_max-x_min)/2;
double y_min = node->y_min;
double y_max = node->y_max;
double y_center = y_min+(y_max-y_min)/2;
assert(particle->x_pos>=node->x_min);
assert(particle->x_pos<=node->x_max);
assert(particle->y_pos>=node->y_min);
assert(particle->y_pos<=node->y_max);
if(particle->x_pos <= x_center) {
if(particle->y_pos <= y_center) {
return 0;
} else {
return 2;
}
} else {
if(particle->y_pos <= y_center) {
return 1;
} else {
return 3;
}
}
}
/* inserts a particle in a node (or one of its children) */
void insert_particle(particle_t* particle, node_t*node) {
#if 0
assert(particle->x_pos >= node->x_min);
assert(particle->x_pos <= node->x_max);
assert(particle->y_pos >= node->y_min);
assert(particle->y_pos <= node->y_max);
assert(particle->node == NULL);
#endif
if(node->n_particles == 0 &&
node->children == NULL) {
assert(node->children == NULL);
/* there's no particle. insert directly */
node->particle = particle;
node->n_particles++;
node->x_center = particle->x_pos;
node->y_center = particle->y_pos;
node->mass = particle->mass;
particle->node = node;
assert(node->children == NULL);
return;
} else {
/* There's already a particle */
if(! node->children) {
/* there's no children yet */
/* create 4 children and move the already-inserted particle to one of them */
//assert(node->x_min != node->x_max);
node->children = alloc_node();
double x_min = node->x_min;
double x_max = node->x_max;
double x_center = x_min+(x_max-x_min)/2;
double y_min = node->y_min;
double y_max = node->y_max;
double y_center = y_min+(y_max-y_min)/2;
omp_set_lock(&lock);
init_node(&node->children[0], node, x_min, x_center, y_min, y_center);
init_node(&node->children[1], node, x_center, x_max, y_min, y_center);
init_node(&node->children[2], node, x_min, x_center, y_center, y_max);
init_node(&node->children[3], node, x_center, x_max, y_center, y_max);
omp_unset_lock(&lock);
/* move the already-inserted particle to one of the children */
particle_t*ptr = node->particle;
//assert(ptr->node == node);
int quadrant = get_quadrant(ptr, node);
node->particle = NULL;
ptr->node = NULL;
insert_particle(ptr, &node->children[quadrant]);
}
/* insert the particle to one of the children */
int quadrant = get_quadrant(particle, node);
node->n_particles++;
//assert(particle->node == NULL);
insert_particle(particle, &node->children[quadrant]);
/* update the mass and center of the node */
double total_mass = 0;
double total_x = 0;
double total_y = 0;
int i;
for(i=0; i<4; i++) {
total_mass += node->children[i].mass;
total_x += node->children[i].x_center*node->children[i].mass;
total_y += node->children[i].y_center*node->children[i].mass;
}
node->mass = total_mass;
node->x_center = total_x/total_mass;
node->y_center = total_y/total_mass;
#if 0
assert(node->particle == NULL);
assert(node->n_particles > 0);
#endif
}
}
/*
Place particles in their initial positions.
*/
void all_init_particles(int num_particles, particle_t *particles)
{
int i;
double total_particle = num_particles;
#pragma omp parallel
{
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < num_particles; i++) {
particle_t *particle = &particles[i];
#if 0
particle->x_pos = ((rand() % max_resolution)- (max_resolution/2))*2.0 / max_resolution;
particle->y_pos = ((rand() % max_resolution)- (max_resolution/2))*2.0 / max_resolution;
particle->x_vel = particle->y_pos;
particle->y_vel = particle->x_pos;
#else
particle->x_pos = i*2.0/nparticles - 1.0;
particle->y_pos = 0.0;
particle->x_vel = 0.0;
particle->y_vel = particle->x_pos;
#endif
particle->mass = 1.0 + (num_particles+i)/total_particle;
particle->node = NULL;
//insert_particle(particle, root);
}
}
}
struct memory_t mem_node;
void init_alloc(int nb_blocks) {
mem_init(&mem_node, 4*sizeof(node_t), nb_blocks);
}
/* allocate a block of 4 nodes */
node_t* alloc_node() {
node_t*ret = mem_alloc(&mem_node);
return ret;
}
void free_root(node_t*root) {
free_node(root);
mem_free(&mem_node, root);
}
void free_node(node_t* n) {
if(!n) return;
if(n->children) {
//assert(n->n_particles > 0);
int i;
for(i=0; i<4; i++) {
free_node(&n->children[i]);
}
mem_free(&mem_node, n->children);
}
}
|
pairwise_align.c | /*
This file is part of SSCA1.
Copyright (C) 2008-2015, UT-Battelle, LLC.
This product includes software produced by UT-Battelle, LLC under Contract No.
DE-AC05-00OR22725 with the Department of Energy.
This program is free software; you can redistribute it and/or modify
it under the terms of the New BSD 3-clause software license (LICENSE).
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
LICENSE for more details.
For more information please contact the SSCA1 developers at:
bakermb@ornl.gov
*/
#define _BSD_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <sort.h>
#include <pairwise_align.h>
#include <string.h>
#include <util.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
typedef struct
{
index_t *goodEnds[2];
score_t *goodScores;
int report;
int size;
score_t min_score;
} current_ends_t;
static index_t unsigned_abs_diff(index_t A, index_t B) {
return (A > B) ? (A - B) : (B - A);
}
void print_back_20(good_match_t *A, seq_t *in_seq, index_t end_index){
index_t start = end_index - 20;
char main_acid_chain[21];
char main_codon_chain[61];
seq_t this_seq;
this_seq.sequence = in_seq->sequence+start;
this_seq.length = 20;
memset(main_acid_chain, '\0', 21);
memset(main_codon_chain, '\0', 21);
assemble_acid_chain(A, main_acid_chain, &this_seq, 20);
assemble_codon_chain(A, main_codon_chain, &this_seq, 20);
printf("%7ld %s %s %7ld\n",
start, main_acid_chain, main_codon_chain, end_index);
}
static void considerAdding(score_t score, int minSeparation, index_t main_index, index_t match_index,
int maxReports, current_ends_t *score_ends) {
//first scan the list to see if there is a match already that is closer
for(int idx=0; idx < score_ends->report; idx++){
if(unsigned_abs_diff(score_ends->goodEnds[0][idx], main_index) < minSeparation || unsigned_abs_diff(score_ends->goodEnds[1][idx], match_index) < minSeparation) {
if(score_ends->goodScores[idx] < score) {
score_ends->goodEnds[0][idx] = main_index;
score_ends->goodEnds[1][idx] = match_index;
score_ends->goodScores[idx] = score;
return;
} else {
return;
}
}
}
//enlarge if needed
if(score_ends->report == score_ends->size) {
index_t worst_keeper, new_best_index=0;
index_t *index_array = NULL;
score_t *sorted_array = NULL;
index_t *best_index = NULL;
if(index_array == NULL) index_array = (index_t *)malloc(score_ends->size*sizeof(index_t));
if(sorted_array == NULL) sorted_array = (score_t *)malloc(score_ends->size*sizeof(score_t));
if(best_index == NULL) best_index = (index_t *)malloc(score_ends->size*sizeof(index_t));
memcpy(sorted_array, score_ends->goodScores, sizeof(score_t)*score_ends->report);
index_sort(sorted_array, index_array, score_ends->report);
worst_keeper = score_ends->size - maxReports;
score_ends->min_score = score_ends->goodScores[best_index[worst_keeper]];
for(int index_for_index=worst_keeper; index_for_index < score_ends->size; index_for_index++) {
best_index[new_best_index] = index_array[index_for_index];
new_best_index++;
}
sort(best_index, new_best_index);
for(int idx=0; idx < new_best_index; idx++) {
score_ends->goodScores[idx] =score_ends->goodScores[best_index[idx]];
score_ends->goodEnds[0][idx]=score_ends->goodEnds[0][best_index[idx]];
score_ends->goodEnds[1][idx]=score_ends->goodEnds[1][best_index[idx]];
}
score_ends->report = maxReports;
free(index_array);
free(sorted_array);
free(best_index);
}
score_ends->goodEnds[0][score_ends->report] = main_index;
score_ends->goodEnds[1][score_ends->report] = match_index;
score_ends->goodScores[score_ends->report] = score;
score_ends->report++;
}
/* release_good_match:
* Free the goot_match_t structure generated by Kernel 1 and Kernel 2
* Note: You can still release a matrix that has not been through Kernel 2,
* there are not ill side effects.
* Input:
* good_match_t *doomed - the structure to be freed
* Output:
* None
*/
void release_good_match(good_match_t *doomed)
{
if(doomed==NULL) return;
free(doomed->goodScores);
free(doomed->goodEnds[1]);
free(doomed->goodEnds[0]);
free(doomed->bestStarts[0]);
free(doomed->bestStarts[1]);
free(doomed->bestEnds[0]);
free(doomed->bestEnds[1]);
free(doomed->bestScores);
for(int idx=0; idx<doomed->bestLength; idx++)
{
free_local_seq(doomed->bestSeqs[idx].main);
free_local_seq(doomed->bestSeqs[idx].match);
}
free(doomed->bestSeqs);
free(doomed);
}
//typedef score_t score_matrix_t;
typedef struct {
score_t *scores;
index_t length;
index_t local_length;
} score_matrix_t;
typedef score_matrix_t gap_matrix_t;
static score_matrix_t *alloc_score_matrix(index_t matrix_length){
score_matrix_t *new_alloc = (score_matrix_t *)malloc(sizeof(score_matrix_t));
assert(new_alloc != NULL);
new_alloc->length = matrix_length;
new_alloc->local_length = (matrix_length / num_nodes);
malloc_all(sizeof(score_t)*3*new_alloc->local_length, (void **)&new_alloc->scores);
assert(new_alloc->scores != NULL);
touch_memory(new_alloc->scores, sizeof(score_t)*3*new_alloc->local_length);
return new_alloc;
}
static gap_matrix_t *alloc_gap_matrix(index_t matrix_length){
gap_matrix_t *new_alloc = (gap_matrix_t *)malloc(sizeof(score_matrix_t));
assert(new_alloc != NULL);
new_alloc->length = matrix_length;
new_alloc->local_length = (matrix_length / num_nodes);
malloc_all(sizeof(score_t)*2*new_alloc->local_length, (void **)&new_alloc->scores);
assert(new_alloc->scores != NULL);
touch_memory(new_alloc->scores, sizeof(score_t)*2*new_alloc->local_length);
return new_alloc;
}
static void free_score_matrix(score_matrix_t *doomed){
FREE_ALL(doomed->scores);
free(doomed);
}
static void free_gap_matrix(gap_matrix_t *doomed){
FREE_ALL(doomed->scores);
free(doomed);
}
#define index2d(x,y,stride) ((y) + ((x) * (stride)))
static void fetch_score(score_matrix_t *A, index_t m, index_t n, score_t *in){
int target_ep = n / A->local_length;
int local_index = n % A->local_length;
SHORT_GET(in, &(A->scores[index2d(m%3,local_index,A->local_length)]), 1, target_ep);
}
static void fetch_gap(gap_matrix_t *A, index_t m, index_t n, score_t *in){
int target_ep = n / A->local_length;
int local_index = n %A->local_length;
SHORT_GET(in, &(A->scores[index2d(m%2,local_index,A->local_length)]), 1, target_ep);
}
/* maybe useful for going to further extremes. Commented out because GCC complains of unused functions */
#if 0
static void fetch_score_nb(score_matrix_t *A, index_t m, index_t n, score_t *in){
int target_ep = n / A->local_length;
int local_index = n % A->local_length;
SHORT_GET_NB((short*)in, &(A->scores[index2d(m%3,local_index,A->local_length)]), 1, target_ep);
}
static void fetch_gap_nb(gap_matrix_t *A, index_t m, index_t n, score_t *in){
int target_ep = n / A->local_length;
int local_index = n %A->local_length;
SHORT_GET_NB((short*)in, &(A->scores[index2d(m%2,local_index,A->local_length)]), 1, target_ep);
}
#endif
static void assign_score(score_matrix_t *A, index_t m, index_t n, score_t new_value){
int target_ep = n / A->local_length;
int local_index = n %A->local_length;
SHORT_PUT(&(A->scores[index2d(m%3,local_index,A->local_length)]), &new_value, 1, target_ep);
}
static void assign_gap(score_matrix_t *A, index_t m, index_t n, score_t new_value){
int target_ep = n / A->local_length;
int local_index = n % A->local_length;
SHORT_PUT(&(A->scores[index2d(m%2,local_index,A->local_length)]), &new_value, 1, target_ep);
}
#ifdef USE_SHMEM
long collect_pSync[_SHMEM_REDUCE_SYNC_SIZE];
int collect_pWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
#endif
static void collect_best_results(current_ends_t **good_ends, int max_reports, int max_threads, good_match_t *answer){
index_t copied=0;
int max_values=0;
current_ends_t collected_ends, current_end;
if(rank != 0) return;
memset(answer->goodEnds[0], 0, sizeof(index_t)*max_reports);
memset(answer->goodEnds[1], 0, sizeof(index_t)*max_reports);
memset(answer->goodScores, 0, sizeof(score_t)*max_reports);
collected_ends.goodScores = malloc(sizeof(score_t)*good_ends[0]->size*num_nodes);
collected_ends.goodEnds[0] = malloc(sizeof(index_t)*good_ends[0]->size*num_nodes);
collected_ends.goodEnds[1] = malloc(sizeof(index_t)*good_ends[0]->size*num_nodes);
for(int idx=0; idx < num_nodes; idx++){
GETMEM(¤t_end, good_ends[0], sizeof(current_ends_t), idx);
SHORT_GET(&(collected_ends.goodScores[copied]), good_ends[0]->goodScores, current_end.report, idx);
LONG_GET(&(collected_ends.goodEnds[0][copied]), good_ends[0]->goodEnds[0], current_end.report, idx);
LONG_GET(&(collected_ends.goodEnds[1][copied]), good_ends[0]->goodEnds[1], current_end.report, idx);
copied += current_end.report;
}
sort_ends_t *sorted_list = malloc(sizeof(sort_ends_t)*copied);
for(int idx=0; idx < copied; idx++){
sorted_list[idx].score = collected_ends.goodScores[idx];
sorted_list[idx].main_end = collected_ends.goodEnds[0][idx];
sorted_list[idx].match_end = collected_ends.goodEnds[1][idx];
}
ends_sort(sorted_list, copied);
if(copied > max_reports){
max_values = max_reports;
} else {
max_values = copied;
}
for(int idx=0; idx < max_values; idx++){
answer->goodScores[idx] = sorted_list[idx].score;
answer->goodEnds[0][idx] = sorted_list[idx].main_end;
answer->goodEnds[1][idx] = sorted_list[idx].match_end;
}
free(sorted_list);
answer->numReports = max_values;
}
/* pairwise_align
* real meat of the program, this function finds codon similarities in seq_data using the matrix sim_matrix
* Input:
* seq_data_t *seq_data - Sequence data generated by genScalData()
* sim_matrix_t *sim_matrix - Codon similarity matrix generated by genSimMatrix()
* int minScore - Minimum end point score, from the init_parameters() function
* int maxReports - Maximum number of reports to keep, from the init_parameters() function
* int minSeparation - Minimum end point seperation in codons, from the init_parameters() function
*
* Output:
* good_matrix_t * - a matrix of good matches
* ->simMatrix - a pointer to the sim_matrix_t used
* ->seqData - a pointer to the seq_data_t used
* ->goodEnds - a [2][maxReports] matrix with main/match endpoints
* ->goodScores - a [maxReports] good scores for upto maxReports endpoints
* ->numReports - an integer, the number of reports represented
*/
good_match_t *pairwise_align(seq_data_t *seq_data, sim_matrix_t *sim_matrix, const int minScore, const int maxReports, const int minSeparation) {
const int sortReports = maxReports * 10;
const seq_t *main_seq = seq_data->main;
const seq_t *match_seq = seq_data->match;
const score_t gapExtend = sim_matrix->gapExtend;
const score_t gapFirst = sim_matrix->gapStart + gapExtend;
const index_t main_len = seq_data->main->length;
codon_t current_main, current_match;
codon_t next_main, next_match;
const int max_threads = omp_get_max_threads();
current_ends_t **good_ends = (current_ends_t **)malloc(sizeof(current_ends_t *)*max_threads);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int jdx=0; jdx < max_threads; jdx++) {
int idx = omp_get_thread_num();
malloc_all(sizeof(current_ends_t), (void **)&good_ends[idx]);
good_ends[idx]->size = sortReports;
good_ends[idx]->report = 0;
malloc_all(sizeof(score_t)*sortReports, (void **)&good_ends[idx]->goodScores);
malloc_all(sizeof(index_t)*sortReports, (void **)&good_ends[idx]->goodEnds[0]);
malloc_all(sizeof(index_t)*sortReports, (void **)&good_ends[idx]->goodEnds[1]);
good_ends[idx]->min_score = minScore;
}
score_matrix_t *restrict score_matrix = alloc_score_matrix(seq_data->match->length);
gap_matrix_t *restrict main_gap_matrix = alloc_gap_matrix(seq_data->match->length);
gap_matrix_t *restrict match_gap_matrix = alloc_gap_matrix(seq_data->match->length);
index_t score_start, score_end;
score_t G, W, E, F, cmp_a, cmp_b, cmp_c, new_score, next_G, next_F, next_E;
index_t m, n;
good_match_t *answer;
codon_t main_codon;
codon_t match_codon;
index_t local_main_start = seq_data->main->local_size * rank;
index_t local_main_end = seq_data->main->local_size * (rank+1) - 1;
/*
if(rank == 0){
printf("Ready to debug on PID=%i\n", getpid());
int gogogo=0;
while(gogogo==0){}
}
*/
//First iteration, done by hand. Basically idx=0 in the big loop
if(rank == 0){
fetch_from_seq(main_seq,0,&main_codon);
fetch_from_seq(match_seq,0,&match_codon);
W = sim_matrix->similarity[main_codon][match_codon];
assign_score(score_matrix,0,0,0 > W ? 0 : W);
assign_gap(main_gap_matrix,0,0,-gapFirst + W);
assign_gap(match_gap_matrix,0,0,-gapFirst + W);
//idx=1 m=0,1 n =1,0
fetch_from_seq(main_seq,0,&main_codon);
fetch_from_seq(match_seq,1, &match_codon);
W = sim_matrix->similarity[main_codon][match_codon];
G = W;
fetch_gap(main_gap_matrix,0,0,&E);
cmp_a = 0 > E ? 0 : E;
cmp_a = cmp_a > G ? cmp_a : G;
assign_score(score_matrix,1,1,cmp_a);
cmp_a = E - gapExtend;
cmp_b = G - gapFirst;
assign_gap(main_gap_matrix,1,0,cmp_a > cmp_b ? cmp_a : cmp_b);
assign_gap(match_gap_matrix,1,0,-gapFirst > cmp_b ? -gapFirst : cmp_b);
fetch_from_seq(main_seq,1,&main_codon);
fetch_from_seq(match_seq,0,&match_codon);
W = sim_matrix->similarity[main_codon][match_codon];
G = W;
fetch_gap(match_gap_matrix,0,0, &F);
cmp_a = 0 > F ? 0 : F;
cmp_a = cmp_a > G ? cmp_a : G;
assign_score(score_matrix,1,0,cmp_a);
cmp_a = F - gapExtend;
cmp_b = G - gapFirst;
assign_gap(main_gap_matrix,1,1,-gapFirst > cmp_b ? -gapFirst : cmp_b);
assign_gap(match_gap_matrix,1,1,cmp_a > cmp_b ? cmp_a : cmp_b);
}
for(index_t idx=2; idx < seq_data->match->length * 2 - 1; idx++) {
BARRIER_ALL();
score_start = idx > (seq_data->match->length - 1) ? (idx-(seq_data->match->length-1)) : 0;
score_end = idx < (seq_data->match->length-1) ? (idx) : (seq_data->match->length-1);
if(idx < seq_data->match->length) {
if(rank == 0){
m = 0;
n = idx;
fetch_from_seq(main_seq,m,&main_codon);
fetch_from_seq(match_seq,n,&match_codon);
W = sim_matrix->similarity[main_codon][match_codon];
G = W;
fetch_gap(match_gap_matrix,idx-1,n-1,&F);
cmp_a = F > 0 ? F : 0;
cmp_a = cmp_a > G ? cmp_a : G;
assign_score(score_matrix,idx,m,cmp_a);
new_score= cmp_a;
if((new_score > good_ends[omp_get_thread_num()]->min_score && W > 0 && new_score == G)){
if (m+1 == seq_data->main->length || n == 0) {
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
}
else {
fetch_from_seq(main_seq, m+1, &next_main);
fetch_from_seq(match_seq, n-1, &next_match);
if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
}
}
}
cmp_a = F - gapExtend;
cmp_b = G - gapFirst;
assign_gap(match_gap_matrix,idx,n,cmp_a > cmp_b ? cmp_a : cmp_b);
m = idx;
n = 0;
fetch_from_seq(main_seq,m,&main_codon);
fetch_from_seq(match_seq,n,&match_codon);
W = sim_matrix->similarity[main_codon][match_codon];
G = W;
fetch_gap(main_gap_matrix, idx-1, m-1, &E);
cmp_a = E > 0 ? E : 0;
cmp_a = cmp_a > G ? cmp_a : G;
assign_score(score_matrix,idx,m,cmp_a);
new_score = cmp_a;
if((new_score > good_ends[omp_get_thread_num()]->min_score && W > 0 && new_score == G)){
if (m+1 == seq_data->main->length || n == 0) {
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
} else {
fetch_from_seq(main_seq, m+1, &next_main);
fetch_from_seq(match_seq, n-1, &next_match);
if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
}
}
}
cmp_a = E - gapExtend;
cmp_b = G - gapFirst;
assign_gap(main_gap_matrix, idx, m, cmp_a > cmp_b ? cmp_a : cmp_b);
}
score_start++;
score_end = score_end - 1;
}
index_t local_start, local_end;
if(score_end < local_main_start && score_start > local_main_end){
local_start = 1;
local_end = 0;
} else {
if(local_main_start > score_start){
local_start = local_main_start;
} else {
local_start = score_start;
}
if(score_end > local_main_end){
local_end = local_main_end;
} else {
local_end = score_end;
}
fetch_from_seq(main_seq,local_start,&next_main);
fetch_from_seq(match_seq,idx - score_start,&next_match);
fetch_score(score_matrix, (idx-2)%3, local_start-1, &next_G);
fetch_gap(match_gap_matrix, idx-1, idx - (score_start+1), &next_F);
fetch_gap(main_gap_matrix, idx-1, local_start-1, &next_E);
}
//As a note, this loop is the program execution time. If you're looking to optimize this benchmark, this is all that counts.
for(index_t antidiagonal = local_start; antidiagonal <= local_end; antidiagonal++) {
m = antidiagonal;
n = idx - m;
#ifdef USE_PREFETCH
current_main = next_main;
current_match = next_match;
G = next_G;
F = next_F;
E = next_E;
if (m < (seq_data->main->length-1))
fetch_from_seq_nb(main_seq, m+1, &next_main);
if (n > 0)
fetch_from_seq_nb(match_seq, n-1, &next_match);
if (n > 1)
fetch_gap(match_gap_matrix, idx-1, n-2, &next_F);
fetch_gap(main_gap_matrix, idx-1, m, &next_E);
fetch_score(score_matrix, (idx-2)%3, m, &next_G);
#else
fetch_from_seq(main_seq, m, ¤t_main);
fetch_from_seq(match_seq, n, ¤t_match);
fetch_gap(match_gap_matrix, idx-1, n-1, &F);
fetch_gap(main_gap_matrix, idx-1, m-1, &E);
fetch_score(score_matrix, (idx-2)%3, m-1, &G);
#endif
cmp_a = 0;
cmp_a = cmp_a > E ? cmp_a : E;
cmp_a = cmp_a > F ? cmp_a : F;
W = sim_matrix->similarity[current_main][current_match];
G += W;
new_score = cmp_a > G ? cmp_a : G;
if((new_score > good_ends[omp_get_thread_num()]->min_score && W > 0 && new_score == G)){
#ifdef USE_PREFETCH
if (m+1 == seq_data->main->length || n == 0) {
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
} else {
WAIT_NB();
if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
}
}
#else
if (m+1 == seq_data->main->length || n == 0) {
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
} else {
fetch_from_seq(main_seq, m+1, &next_main);
fetch_from_seq(match_seq, n-1, &next_match);
if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){
considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]);
}
}
#endif
}
cmp_a = E - gapExtend;
cmp_b = G - gapFirst;
cmp_c = F - gapExtend;
#ifdef USE_PREFETCH
WAIT_NB();
#endif
assign_score(score_matrix,idx,m,new_score);
assign_gap(main_gap_matrix, idx, m, cmp_a > cmp_b ? cmp_a : cmp_b);
assign_gap(match_gap_matrix, idx, n, cmp_c > cmp_b ? cmp_c : cmp_b);
}
}
answer = (good_match_t*)malloc(sizeof(good_match_t));
answer->simMatrix = sim_matrix;
answer->seqData = seq_data;
answer->goodEnds[0] = (index_t*)malloc(sizeof(index_t)*maxReports);
answer->goodEnds[1] = (index_t*)malloc(sizeof(index_t)*maxReports);
answer->goodScores = (score_t*)malloc(sizeof(score_t)*maxReports);
answer->bestEnds[0] = NULL;
answer->bestStarts[0] = NULL;
answer->bestEnds[1] = NULL;
answer->bestStarts[1] = NULL;
answer->bestSeqs = NULL;
answer->bestScores = NULL;
BARRIER_ALL();
collect_best_results(good_ends, maxReports, max_threads, answer);
free_score_matrix(score_matrix);
free_gap_matrix(main_gap_matrix);
free_gap_matrix(match_gap_matrix);
for(int idx=0; idx < max_threads; idx++) {
FREE_ALL(good_ends[idx]->goodScores);
FREE_ALL(good_ends[idx]->goodEnds[0]);
FREE_ALL(good_ends[idx]->goodEnds[1]);
FREE_ALL(good_ends[idx]);
}
free(good_ends);
return answer;
}
|
Cone.h | #ifndef CONE_HEADER
#define CONE_HEADER
#ifdef DOPARALLEL
#include <omp.h>
#endif
#include "basic.h"
#include "PointCloud.h"
#include <GfxTL/HyperplaneCoordinateSystem.h>
#include <stdexcept>
#include <ostream>
#include <istream>
#include <stdio.h>
#include <MiscLib/NoShrinkVector.h>
#include "LevMarLSWeight.h"
#include "LevMarFitting.h"
#ifndef DLL_LINKAGE
#define DLL_LINKAGE
#endif
// This implements a one sided cone!
class DLL_LINKAGE Cone
{
public:
struct ParallelPlanesError
: public std::runtime_error
{
ParallelPlanesError()
: std::runtime_error("Parallel planes in cone construction")
{}
};
enum { RequiredSamples = 3 };
Cone();
Cone(const Vec3f ¢er, const Vec3f &axisDir, float angle);
Cone(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3,
const Vec3f &n1, const Vec3f &n2, const Vec3f &n3);
bool Init(const MiscLib::Vector< Vec3f > &samples);
bool InitAverage(const MiscLib::Vector< Vec3f > &samples);
bool Init(const Vec3f ¢er, const Vec3f &axisDir, float angle);
bool Init(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3,
const Vec3f &n1, const Vec3f &n2, const Vec3f &n3);
bool Init(bool binary, std::istream *i);
void Init(FILE *i);
void Init(float* array);
inline float Distance(const Vec3f &p) const;
inline void Normal(const Vec3f &p, Vec3f *n) const;
inline float DistanceAndNormal(const Vec3f &p, Vec3f *n) const;
inline float SignedDistance(const Vec3f &p) const;
inline float SignedDistanceAndNormal(const Vec3f &p, Vec3f *n) const;
void Project(const Vec3f &p, Vec3f *pp) const;
// Paramterizes into (length, angle)
void Parameters(const Vec3f &p,
std::pair< float, float > *param) const;
inline float Height(const Vec3f &p) const;
inline float Angle() const;
inline const Vec3f &Center() const;
inline const Vec3f &AxisDirection() const;
Vec3f &AxisDirection() { return m_axisDir; }
inline const Vec3f AngularDirection() const;
//void AngularDirection(const Vec3f &angular);
void RotateAngularDirection(float radians);
inline float RadiusAtLength(float length) const;
bool LeastSquaresFit(const PointCloud &pc,
MiscLib::Vector< size_t >::const_iterator begin,
MiscLib::Vector< size_t >::const_iterator end);
template< class IteratorT >
bool LeastSquaresFit(IteratorT begin, IteratorT end);
bool Fit(const PointCloud &pc,
MiscLib::Vector< size_t >::const_iterator begin,
MiscLib::Vector< size_t >::const_iterator end)
{ return LeastSquaresFit(pc, begin, end); }
static bool Interpolate(const MiscLib::Vector< Cone > &cones,
const MiscLib::Vector< float > &weights, Cone *ic);
void Serialize(bool binary, std::ostream *o) const;
static size_t SerializedSize();
void Serialize(FILE *o) const;
void Serialize(float* array) const;
static size_t SerializedFloatSize();
void Transform(float scale, const Vec3f &translate);
void Transform(const GfxTL::MatrixXX< 3, 3, float > &rot,
const GfxTL::Vector3Df &trans);
inline unsigned int Intersect(const Vec3f &p, const Vec3f &r,
float lambda[2], Vec3f interPts[2]) const;
private:
template< class WeightT >
class LevMarCone
: public WeightT
{
public:
enum { NumParams = 7 };
typedef float ScalarType;
template< class IteratorT >
ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end,
ScalarType *values, ScalarType *temp) const
{
ScalarType chi = 0;
ScalarType cosPhi = std::cos(params[6]);
ScalarType sinPhi = std::sin(params[6]);
int size = end - begin;
#pragma omp parallel for schedule(static) reduction(+:chi)
for(int idx = 0; idx < size; ++idx)
{
Vec3f s;
for(unsigned int j = 0; j < 3; ++j)
s[j] = begin[idx][j] - params[j];
ScalarType g = abs(s[0] * params[3] + s[1] * params[4] + s[2] * params[5]);
ScalarType f = s.sqrLength() - (g * g);
if(f <= 0)
f = 0;
else
f = std::sqrt(f);
temp[idx] = f;
chi += (values[idx] = WeightT::Weigh(cosPhi * f - sinPhi * g))
* values[idx];
}
return chi;
}
template< class IteratorT >
void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end,
const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const
{
ScalarType sinPhi = -std::sin(params[6]);
ScalarType cosPhi = std::cos(params[6]);
int size = end - begin;
#pragma omp parallel for schedule(static)
for(int idx = 0; idx < size; ++idx)
{
Vec3f s;
for(unsigned int j = 0; j < 3; ++j)
s[j] = begin[idx][j] - params[j];
ScalarType g = abs(s[0] * params[3] + s[1] * params[4] + s[2] * params[5]);
ScalarType ggradient[6];
for(unsigned int j = 0; j < 3; ++j)
ggradient[j] = -params[j + 3];
for(unsigned int j = 0; j < 3; ++j)
ggradient[j + 3] = s[j] - params[j + 3] * g;
ScalarType fgradient[6];
if(temp[idx] < 1e-6)
{
fgradient[0] = std::sqrt(1 - params[3] * params[3]);
fgradient[1] = std::sqrt(1 - params[4] * params[4]);
fgradient[2] = std::sqrt(1 - params[5] * params[5]);
}
else
{
fgradient[0] = (params[3] * g - s[0]) / temp[idx];
fgradient[1] = (params[4] * g - s[1]) / temp[idx];
fgradient[2] = (params[5] * g - s[2]) / temp[idx];
}
fgradient[3] = g * fgradient[0];
fgradient[4] = g * fgradient[1];
fgradient[5] = g * fgradient[2];
for(unsigned int j = 0; j < 6; ++j)
matrix[idx * NumParams + j] =
cosPhi * fgradient[j] + sinPhi * ggradient[j];
matrix[idx * NumParams + 6] = temp[idx] * sinPhi - g * cosPhi;
WeightT::template DerivWeigh< NumParams >(cosPhi * temp[idx] + sinPhi * g,
matrix + idx * NumParams);
}
}
void Normalize(float *params) const
{
// normalize direction
ScalarType l = std::sqrt(params[3] * params[3] + params[4] * params[4] +
params[5] * params[5]);
for(unsigned int i = 3; i < 6; ++i)
params[i] /= l;
// normalize angle
params[6] -= std::floor(params[6] / (2 * ScalarType(M_PI))) * (2 * ScalarType(M_PI)); // params[6] %= 2*M_PI
if(params[6] > M_PI)
{
params[6] -= std::floor(params[6] / ScalarType(M_PI)) * ScalarType(M_PI); // params[6] %= M_PI
for(unsigned int i = 3; i < 6; ++i)
params[i] *= -1;
}
if(params[6] > ScalarType(M_PI) / 2)
params[6] = ScalarType(M_PI) - params[6];
}
};
private:
Vec3f m_center; // this is the apex of the cone
Vec3f m_axisDir; // the axis points into the interior of the cone
float m_angle; // the opening angle
Vec3f m_normal;
Vec3f m_normalY; // precomputed normal part
float m_n2d[2];
GfxTL::HyperplaneCoordinateSystem< float, 3 > m_hcs;
float m_angularRotatedRadians;
};
inline float Cone::Distance(const Vec3f &p) const
{
// this is for one sided cone!
Vec3f s = p - m_center;
float g = s.dot(m_axisDir); // distance to plane orhogonal to
// axisdir through center
// distance to axis
float sqrS = s.sqrLength();
float f = sqrS - (g * g);
if(f <= 0)
f = 0;
else
f = std::sqrt(f);
float da = m_n2d[0] * f;
float db = m_n2d[1] * g;
if(g < 0 && da - db < 0) // is inside other side of cone -> disallow
return std::sqrt(sqrS);
return abs(da + db);
}
inline void Cone::Normal(const Vec3f &p, Vec3f *n) const
{
Vec3f s = p - m_center;
Vec3f pln = s.cross(m_axisDir);
Vec3f plx = m_axisDir.cross(pln);
plx.normalize();
// we are not dealing with two-sided cone
*n = m_normal[0] * plx + m_normalY;
}
inline float Cone::DistanceAndNormal(const Vec3f &p, Vec3f *n) const
{
// this is for one-sided cone !!!
Vec3f s = p - m_center;
float g = s.dot(m_axisDir); // distance to plane orhogonal to
// axisdir through center
// distance to axis
float sqrS = s.sqrLength();
float f = sqrS - (g * g);
if(f <= 0)
f = 0;
else
f = std::sqrt(f);
float da = m_n2d[0] * f;
float db = m_n2d[1] * g;
float dist;
if(g < 0 && da - db < 0) // is inside other side of cone -> disallow
dist = std::sqrt(sqrS);
else
dist = abs(da + db);
// need normal
Vec3f plx = s - g * m_axisDir;
plx.normalize();
*n = m_normal[0] * plx + m_normalY;
return dist;
}
inline float Cone::SignedDistance(const Vec3f &p) const
{
// this is for one sided cone!
Vec3f s = p - m_center;
float g = s.dot(m_axisDir); // distance to plane orhogonal to
// axisdir through center
// distance to axis
float sqrS = s.sqrLength();
float f = sqrS - (g * g);
if(f <= 0)
f = 0;
else
f = std::sqrt(f);
float da = m_n2d[0] * f;
float db = m_n2d[1] * g;
if(g < 0 && da - db < 0) // is inside other side of cone -> disallow
return std::sqrt(sqrS);
return da + db;
}
inline float Cone::SignedDistanceAndNormal(const Vec3f &p, Vec3f *n) const
{
// this is for one-sided cone !!!
Vec3f s = p - m_center;
float g = s.dot(m_axisDir); // distance to plane orhogonal to
// axisdir through center
// distance to axis
float sqrS = s.sqrLength();
float f = sqrS - (g * g);
if(f <= 0)
f = 0;
else
f = std::sqrt(f);
float da = m_n2d[0] * f;
float db = m_n2d[1] * g;
float dist;
if(g < 0 && da - db < 0) // is inside other side of cone -> disallow
dist = std::sqrt(sqrS);
else
dist = da + db;
// need normal
Vec3f plx = s - g * m_axisDir;
plx.normalize();
*n = m_normal[0] * plx + m_normalY;
return dist;
}
float Cone::Angle() const
{
return m_angle;
}
const Vec3f &Cone::Center() const
{
return m_center;
}
const Vec3f &Cone::AxisDirection() const
{
return m_axisDir;
}
const Vec3f Cone::AngularDirection() const
{
return Vec3f(m_hcs[0].Data());
}
float Cone::RadiusAtLength(float length) const
{
return std::sin(m_angle) * abs(length);
}
float Cone::Height(const Vec3f &p) const
{
Vec3f s = p - m_center;
return m_axisDir.dot(s);
}
template< class IteratorT >
bool Cone::LeastSquaresFit(IteratorT begin, IteratorT end)
{
float param[7];
for(unsigned int i = 0; i < 3; ++i)
param[i] = m_center[i];
for(unsigned int i = 0; i < 3; ++i)
param[i + 3] = m_axisDir[i];
param[6] = m_angle;
LevMarCone< LevMarLSWeight > levMarCone;
if(!LevMar(begin, end, levMarCone, param))
return false;
if(param[6] < 1e-6 || param[6] > float(M_PI) / 2 - 1e-6)
return false;
for(unsigned int i = 0; i < 3; ++i)
m_center[i] = param[i];
for(unsigned int i = 0; i < 3; ++i)
m_axisDir[i] = param[i + 3];
m_angle = param[6];
m_normal = Vec3f(std::cos(-m_angle), std::sin(-m_angle), 0);
m_normalY = m_normal[1] * m_axisDir;
m_n2d[0] = std::cos(m_angle);
m_n2d[1] = -std::sin(m_angle);
m_hcs.FromNormal(m_axisDir);
m_angularRotatedRadians = 0;
// it could be that the axis has flipped during fitting
// we need to detect such a case
// for this we run over all points and compute the sum
// of their respective heights. If that sum is negative
// the axis needs to be flipped.
float heightSum = 0;
intptr_t size = end - begin;
#ifndef _WIN64 // for some reason the Microsoft x64 compiler crashes at the next line
#pragma omp parallel for schedule(static) reduction(+:heightSum)
#endif
for(intptr_t i = 0; i < size; ++i)
heightSum += Height(begin[i]);
if(heightSum < 0)
{
m_axisDir *= -1;
m_hcs.FromNormal(m_axisDir);
}
return true;
}
inline unsigned int Cone::Intersect(const Vec3f &p, const Vec3f &r,
float lambda[2], Vec3f interPts[2]) const
{
// Set up the quadratic Q(t) = c2*t^2 + 2*c1*t + c0 that corresponds to
// the cone. Let the vertex be V, the unit-length direction vector be A,
// and the angle measured from the cone axis to the cone wall be Theta,
// and define g = cos(Theta). A point X is on the cone wall whenever
// Dot(A,(X-V)/|X-V|) = g. Square this equation and factor to obtain
// (X-V)^T * (A*A^T - g^2*I) * (X-V) = 0
// where the superscript T denotes the transpose operator. This defines
// a double-sided cone. The line is L(t) = P + t*D, where P is the line
// origin and D is a unit-length direction vector. Substituting
// X = L(t) into the cone equation above leads to Q(t) = 0. Since we
// want only intersection points on the single-sided cone that lives in
// the half-space pointed to by A, any point L(t) generated by a root of
// Q(t) = 0 must be tested for Dot(A,L(t)-V) >= 0.
using namespace std;
float fAdD = m_axisDir.dot(r);
float tmp, fCosSqr = (tmp = cos(m_angle)) * tmp;
Vec3f kE = p - m_center;
float fAdE = m_axisDir.dot(kE);
float fDdE = r.dot(kE);
float fEdE = kE.dot(kE);
float fC2 = fAdD*fAdD - fCosSqr;
float fC1 = fAdD*fAdE - fCosSqr*fDdE;
float fC0 = fAdE*fAdE - fCosSqr*fEdE;
float fdot;
// Solve the quadratic. Keep only those X for which Dot(A,X-V) >= 0.
unsigned int interCount = 0;
if (abs(fC2) >= 1e-7)
{
// c2 != 0
float fDiscr = fC1*fC1 - fC0*fC2;
if (fDiscr < (float)0.0)
{
// Q(t) = 0 has no real-valued roots. The line does not
// intersect the double-sided cone.
return 0;
}
else if (fDiscr > 1e-7)
{
// Q(t) = 0 has two distinct real-valued roots. However, one or
// both of them might intersect the portion of the double-sided
// cone "behind" the vertex. We are interested only in those
// intersections "in front" of the vertex.
float fRoot = sqrt(fDiscr);
float fInvC2 = ((float)1.0)/fC2;
interCount = 0;
float fT = (-fC1 - fRoot)*fInvC2;
if(fT > 0) // intersect only in positive direction of ray
{
interPts[interCount] = p + fT*r;
kE = interPts[interCount] - m_center;
fdot = kE.dot(m_axisDir);
if (fdot > (float)0.0)
{
lambda[interCount] = fT;
interCount++;
}
}
fT = (-fC1 + fRoot)*fInvC2;
if(fT > 0)
{
interPts[interCount] = p + fT*r;
kE = interPts[interCount] - m_center;
fdot = kE.dot(m_axisDir);
if (fdot > (float)0.0)
{
lambda[interCount] = fT;
interCount++;
}
}
}
else if(fC1 / fC2 < 0)
{
// one repeated real root (line is tangent to the cone)
interPts[0] = p - (fC1/fC2)*r;
lambda[0] = -(fC1 / fC2);
kE = interPts[0] - m_center;
if (kE.dot(m_axisDir) > (float)0.0)
interCount = 1;
else
interCount = 0;
}
}
else if (abs(fC1) >= 1e-7)
{
// c2 = 0, c1 != 0 (D is a direction vector on the cone boundary)
lambda[0] = -(((float)0.5)*fC0/fC1);
if(lambda[0] < 0)
return 0;
interPts[0] = p + lambda[0] *r;
kE = interPts[0] - m_center;
fdot = kE.dot(m_axisDir);
if (fdot > (float)0.0)
interCount = 1;
else
interCount = 0;
}
else if (abs(fC0) >= 1e-7)
{
// c2 = c1 = 0, c0 != 0
interCount = 0;
}
else
{
// c2 = c1 = c0 = 0, cone contains ray V+t*D where V is cone vertex
// and D is the line direction.
interCount = 1;
lambda[0] = (m_center - p).dot(r);
interPts[0] = m_center;
}
return interCount;
}
#endif
|
ConvexLS.h | ///////////////////////////////////////////////////////////////////////////////
// Dem Bones - Skinning Decomposition Library //
// Copyright (c) 2019, Electronic Arts. All rights reserved. //
///////////////////////////////////////////////////////////////////////////////
#ifndef DEM_BONES_CONVEX_LS
#define DEM_BONES_CONVEX_LS
#include "Indexing.h"
#include <Eigen/Dense>
#include <Eigen/StdVector>
namespace Dem {
/** @class ConvexLS ConvexLS.h "DemBones/ConvexLS.h"
@brief Linear least squares solver with non-negativity constraint and
optional affinity constraint
@details Solve:
@f{eqnarray*}{
min &||Ax-b||^2 \\
\mbox{Subject to: } & x(0).. x(n-1) \geq 0, \\
\mbox{(optional) } & x(0) +.. + x(n-1) = 1
@f}
The solver implements active set method to handle non-negativity
constraint and QR decomposition to handle affinity constraint.
@b _Scalar is the floating-point data type.
*/
template <class _Scalar>
class ConvexLS {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
using MatrixX = Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using VectorX = Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>;
/** Constructor, just call init()
@param[in] maxSize is the maximum size of the unknown @f$ x @f$ if the
affinity constraint is imposed.
*/
ConvexLS(int maxSize = 1) {
q2.resize(0);
init(maxSize);
}
/** Init matrices @f$ Q @f$ in the QR decomposition used for affinity
constraint
@param[in] maxSize is the maximum size of the unknown @f$ x @f$ if the
affinity constraint is imposed.
*/
void init(int maxSize) {
int curN = (int)q2.size() + 1;
if (curN < maxSize) {
q2.resize(maxSize - 1);
// #pragma omp parallel for
for (int n = curN - 1; n < maxSize - 1; n++)
q2[n] = MatrixX(VectorX::Constant(n + 2, _Scalar(1))
.householderQr()
.householderQ())
.rightCols(n + 1);
}
}
/** Solve the least squares problem
@param[in] aTa is the cross product matrix @f$ A^TA @f$
@param[in] aTb is the vector @f$ A^Tb @f$
@param[in, out] x is the by-reference output and it is also the init
solution (if @b warmStart == @c true)
@param[in] affine=true will impose affinity constraint
@param[in] warmStart=true will initialize the solution by @b x
*/
void solve(const MatrixX &aTa, const VectorX &aTb, VectorX &x, bool affine,
bool warmStart = false) {
int n = int(aTa.cols());
if (!warmStart)
x = VectorX::Constant(n, _Scalar(1) / n);
Eigen::ArrayXi idx(n);
int np = 0;
for (int i = 0; i < n; i++)
if (x(i) > 0)
idx[np++] = i;
else
idx[n - i + np - 1] = i;
VectorX p;
for (int rep = 0; rep < n; rep++) {
solveP(aTa, aTb, x, idx, np, affine, p);
if ((indexing_vector(x, idx.head(np)) + indexing_vector(p, idx.head(np)))
.minCoeff() >= 0) {
x += p;
if (np == n)
break;
Eigen::Index iMax;
(indexing_vector(aTb, idx.tail(n - np)) -
indexing_row(aTa, idx.tail(n - np)) * x)
.maxCoeff(&iMax);
std::swap(idx[iMax + np], idx[np]);
np++;
} else {
_Scalar alpha;
int iMin = -1;
for (int i = 0; i < np; i++)
if (p(idx[i]) < 0) {
if ((iMin == -1) || (x(idx[i]) < -alpha * p(idx[i]))) {
alpha = -x(idx[i]) / p(idx[i]);
iMin = i;
}
}
x += alpha * p;
_Scalar eps = std::abs(x(idx[iMin]));
x(idx[iMin]) = 0;
for (int i = 0; i < np; i++)
if (x(idx[i]) <= eps)
std::swap(idx[i--], idx[--np]);
}
if (affine)
x /= x.sum();
}
}
private:
//! Store @f$ Q @f$ matrices in QR decompositions, except the first column.
//! q2.size()==maxSize-1 (of x), q2[n].size()==(n+2)*(n+1)
std::vector<MatrixX, Eigen::aligned_allocator<MatrixX>> q2;
/** Solve the gradient
@param[in] aTa is the cross product matrix @f$ A^TA @f$
@param[in] aTb is the vector @f$ A^Tb @f$
@param[in] x is the current solution
@param[in] idx indicates the current active set, @p idx(0).. @p
idx(np-1) are passive (free) variables
@param[in] np is the size of the active set
@param[in] zeroSum=true will impose zer-sum of gradient
@param[output] p is the by-reference negative gradient output
*/
void solveP(const MatrixX &aTa, const VectorX &aTb, const VectorX &x,
const Eigen::ArrayXi &idx, int np, bool zeroSum, VectorX &p) {
VectorX z;
p.setZero(aTb.size());
if (!zeroSum) {
z = indexing_row_col(aTa, idx.head(np), idx.head(np))
.colPivHouseholderQr()
.solve( // A
indexing_vector(aTb, idx.head(np)) -
indexing_row(aTa, idx.head(np)) * x); // b
for (int ip = 0; ip < np; ip++)
p(idx[ip]) = z(ip);
} else if (np > 1) {
z = q2[np - 2] *
( // Re-project
(q2[np - 2].transpose() *
indexing_row_col(aTa, idx.head(np), idx.head(np)) * q2[np - 2])
.colPivHouseholderQr()
.solve( // A
q2[np - 2].transpose() *
(indexing_vector(aTb, idx.head(np)) -
indexing_row(aTa, idx.head(np)) * x))); // b
for (int ip = 0; ip < np; ip++)
p(idx[ip]) = z(ip);
}
}
};
} // namespace Dem
#endif
|
ast-dump-openmp-flush.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp flush
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-flush.c:3:1, line:5:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:5:1>
// CHECK-NEXT: `-OMPFlushDirective {{.*}} <line:4:1, col:18> openmp_standalone_directive
|
bdd_sequential_base.h | #pragma once
#include <vector>
#include <array>
#include <Eigen/SparseCore>
#include "bdd_collection/bdd_collection.h"
#include "two_dimensional_variable_array.hxx"
#include <fstream>
#include <cstdlib>
#include <filesystem>
#include <unordered_set>
#include "time_measure_util.h"
#include "atomic_ref.hpp"
namespace LPMP {
// store BDDs one after the other.
// allows for efficient computation of min marginals and parallel mma
template<typename BDD_BRANCH_NODE>
class bdd_sequential_base {
public:
using value_type = typename BDD_BRANCH_NODE::value_type;
bdd_sequential_base(BDD::bdd_collection& bdd_col) { add_bdds(bdd_col); }
void add_bdds(BDD::bdd_collection& bdd_col);
size_t nr_bdds() const;
size_t nr_bdds(const size_t var) const;
size_t nr_variables() const;
size_t nr_variables(const size_t bdd_nr) const;
size_t variable(const size_t bdd_nr, const size_t bdd_index) const;
size_t nr_bdd_variables() const;
double lower_bound();
using vector_type = Eigen::Matrix<typename BDD_BRANCH_NODE::value_type, Eigen::Dynamic, 1>;
vector_type lower_bound_per_bdd();
void forward_run();
void backward_run();
void backward_run(const size_t bdd_nr);
//two_dim_variable_array<std::array<value_type,2>> min_marginals();
two_dim_variable_array<std::array<double,2>> min_marginals();
using min_marginal_type = Eigen::Matrix<typename BDD_BRANCH_NODE::value_type, Eigen::Dynamic, 2>;
std::tuple<min_marginal_type, std::vector<char>> min_marginals_stacked();
template<typename COST_ITERATOR>
void update_costs(COST_ITERATOR cost_lo_begin, COST_ITERATOR cost_lo_end, COST_ITERATOR cost_hi_begin, COST_ITERATOR cost_hi_end);
// TODO: remove these! //
void update_costs(const two_dim_variable_array<std::array<value_type,2>>& delta);
void update_costs(const min_marginal_type& delta);
vector_type get_costs();
void update_costs(const vector_type& delta);
/////////////////////////
template<typename ITERATOR>
void fix_variables(ITERATOR zero_fixations_begin, ITERATOR zero_fixations_end, ITERATOR one_fixations_begin, ITERATOR one_fixations_end);
// make a step that is guaranteed to be non-decreasing in the lower bound.
void diffusion_step(const two_dim_variable_array<std::array<value_type,2>>& min_margs, const value_type damping_step = 1.0);
// compute incremental min marginals and perform min-marginal averaging subsequently
void parallel_mma();
void forward_mm(const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute);
value_type backward_mm(const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute);
void distribute_delta();
// Both operations below are inverses of each other
// Given elements in order bdd_nr/bdd_index, transpose to variable/bdd_index with same variable.
template<typename T>
two_dim_variable_array<T> transpose_to_var_order(const two_dim_variable_array<T>& m) const;
// Given elements in order var/bdd_index with same variable, transpose to bdd_nr/bdd_index.
template<typename T>
two_dim_variable_array<T> transpose_to_bdd_order(const two_dim_variable_array<T>& m) const;
Eigen::SparseMatrix<value_type> Lagrange_constraint_matrix() const;
void export_graphviz(const char* filename);
void export_graphviz(const std::string& filename);
template<typename STREAM>
void export_graphviz(STREAM& s, const size_t bdd_nr);
private:
enum class message_passing_state {
after_forward_pass,
after_backward_pass,
none
} message_passing_state_ = message_passing_state::none;
enum class lower_bound_state {
valid,
invalid
} lower_bound_state_ = lower_bound_state::invalid;
double lower_bound_ = -std::numeric_limits<double>::infinity();
double constant_ = 0.0;
double compute_lower_bound();
double compute_lower_bound_after_forward_pass();
double compute_lower_bound_after_backward_pass();
vector_type lower_bound_per_bdd_after_forward_pass();
vector_type lower_bound_per_bdd_after_backward_pass();
std::array<size_t,2> bdd_range(const size_t bdd_nr) const;
std::array<size_t,2> bdd_index_range(const size_t bdd_nr, const size_t bdd_idx) const;
std::vector<BDD_BRANCH_NODE> bdd_branch_nodes_;
// holds ranges of bdd branch instructions of specific bdd with specific variable
struct bdd_variable {
size_t offset;
size_t variable;
};
two_dim_variable_array<bdd_variable> bdd_variables_;
std::vector<size_t> nr_bdds_per_variable_;
// for parallel mma
std::vector<std::array<value_type,2>> mms_to_collect_;
std::vector<std::array<value_type,2>> mms_to_distribute_;
};
////////////////////
// implementation //
////////////////////
template<typename BDD_BRANCH_NODE>
size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_bdds() const
{
assert(bdd_variables_.size() > 0);
return bdd_variables_.size() - 1;
}
template<typename BDD_BRANCH_NODE>
size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_variables() const
{
return nr_bdds_per_variable_.size();
}
template<typename BDD_BRANCH_NODE>
size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_bdds(const size_t variable) const
{
assert(variable < nr_variables());
return nr_bdds_per_variable_[variable];
}
template<typename BDD_BRANCH_NODE>
size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_variables(const size_t bdd_nr) const
{
assert(bdd_nr < nr_bdds());
assert(bdd_variables_.size(bdd_nr) > 0);
return bdd_variables_.size(bdd_nr) - 1;
}
template<typename BDD_BRANCH_NODE>
size_t bdd_sequential_base<BDD_BRANCH_NODE>::variable(const size_t bdd_nr, const size_t bdd_index) const
{
assert(bdd_nr < nr_bdds());
assert(bdd_index < nr_variables(bdd_nr));
return bdd_variables_(bdd_nr, bdd_index).variable;
}
template<typename BDD_BRANCH_NODE>
size_t bdd_sequential_base<BDD_BRANCH_NODE>::nr_bdd_variables() const
{
return std::accumulate(nr_bdds_per_variable_.begin(), nr_bdds_per_variable_.end(), 0);
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::add_bdds(BDD::bdd_collection& bdd_col)
{
message_passing_state_ = message_passing_state::none;
assert(bdd_branch_nodes_.size() == 0); // currently does not support incremental addition of BDDs
bdd_branch_nodes_.clear();
const size_t total_nr_bdd_nodes = [&]() {
size_t i=0;
for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr)
i += bdd_col.nr_bdd_nodes(bdd_nr)-2; // do not count terminal nodes
return i;
}();
bdd_branch_nodes_.reserve(total_nr_bdd_nodes);
bdd_variables_.clear();
nr_bdds_per_variable_.clear();
const size_t nr_vars = [&]() {
size_t max_v=0;
for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr)
max_v = std::max(max_v, bdd_col.min_max_variables(bdd_nr)[1]);
return max_v+1;
}();
nr_bdds_per_variable_.resize(nr_vars, 0);
for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr)
{
assert(bdd_col.is_qbdd(bdd_nr));
assert(bdd_col.is_reordered(bdd_nr));
}
for(size_t bdd_nr=0; bdd_nr<bdd_col.nr_bdds(); ++bdd_nr)
{
assert(bdd_col.is_qbdd(bdd_nr));
assert(bdd_col.is_reordered(bdd_nr));
std::vector<bdd_variable> cur_bdd_variables;
cur_bdd_variables.push_back({bdd_branch_nodes_.size(), bdd_col.min_max_variables(bdd_nr)[0]}); // TODO: use min_variable
for(auto bdd_it=bdd_col.cbegin(bdd_nr); bdd_it!=bdd_col.cend(bdd_nr); ++bdd_it)
{
const BDD::bdd_instruction& stored_bdd = *bdd_it;
assert(!stored_bdd.is_terminal());
BDD_BRANCH_NODE bdd;
if(bdd_col.get_bdd_instruction(stored_bdd.lo).is_botsink())
bdd.offset_low = BDD_BRANCH_NODE::terminal_0_offset;
else if(bdd_col.get_bdd_instruction(stored_bdd.lo).is_topsink())
bdd.offset_low = BDD_BRANCH_NODE::terminal_1_offset;
else
{
assert(bdd_col.offset(stored_bdd) < stored_bdd.lo);
bdd.offset_low = stored_bdd.lo - bdd_col.offset(stored_bdd);
}
if(bdd_col.get_bdd_instruction(stored_bdd.hi).is_botsink())
bdd.offset_high = BDD_BRANCH_NODE::terminal_0_offset;
else if(bdd_col.get_bdd_instruction(stored_bdd.hi).is_topsink())
bdd.offset_high = BDD_BRANCH_NODE::terminal_1_offset;
else
{
assert(bdd_col.offset(stored_bdd) < stored_bdd.hi);
bdd.offset_high = stored_bdd.hi - bdd_col.offset(stored_bdd);
}
if(bdd.offset_low == BDD_BRANCH_NODE::terminal_0_offset)
bdd.low_cost = std::numeric_limits<decltype(bdd.low_cost)>::infinity();
if(bdd.offset_high == BDD_BRANCH_NODE::terminal_0_offset)
bdd.high_cost = std::numeric_limits<decltype(bdd.high_cost)>::infinity();
if(stored_bdd.index != cur_bdd_variables.back().variable)
cur_bdd_variables.push_back({bdd_branch_nodes_.size(), stored_bdd.index});
assert(bdd_branch_nodes_.size() < total_nr_bdd_nodes);
bdd_branch_nodes_.push_back(bdd);
}
assert(cur_bdd_variables.back().variable == bdd_col.min_max_variables(bdd_nr)[1]);
cur_bdd_variables.push_back({bdd_branch_nodes_.size(), std::numeric_limits<size_t>::max()}); // For extra delimiter at the end
bdd_variables_.push_back(cur_bdd_variables.begin(), cur_bdd_variables.end());
assert(bdd_variables_.size(bdd_nr) == bdd_col.variables(bdd_nr).size()+1);
for(const auto [offset, v] : cur_bdd_variables)
{
assert(v < nr_bdds_per_variable_.size() || v == std::numeric_limits<size_t>::max());
if(v != std::numeric_limits<size_t>::max())
nr_bdds_per_variable_[v]++;
}
}
assert(bdd_branch_nodes_.size() == total_nr_bdd_nodes);
// add last entry for offset
std::vector<bdd_variable> tmp_bdd_variables;
tmp_bdd_variables.push_back({bdd_branch_nodes_.size(), std::numeric_limits<size_t>::max()});
bdd_variables_.push_back(tmp_bdd_variables.begin(), tmp_bdd_variables.end());
}
template<typename BDD_BRANCH_NODE>
double bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound()
{
if(lower_bound_state_ == lower_bound_state::invalid)
compute_lower_bound();
assert(lower_bound_state_ == lower_bound_state::valid);
return lower_bound_;
}
template<typename BDD_BRANCH_NODE>
double bdd_sequential_base<BDD_BRANCH_NODE>::compute_lower_bound()
{
if(message_passing_state_ == message_passing_state::after_backward_pass)
{
lower_bound_ = compute_lower_bound_after_backward_pass();
}
else if(message_passing_state_ == message_passing_state::after_forward_pass)
{
lower_bound_ = compute_lower_bound_after_forward_pass();
}
else if(message_passing_state_ == message_passing_state::none)
{
backward_run();
lower_bound_ = compute_lower_bound_after_backward_pass();
}
lower_bound_state_ = lower_bound_state::valid;
return lower_bound_;
}
template<typename BDD_BRANCH_NODE>
double bdd_sequential_base<BDD_BRANCH_NODE>::compute_lower_bound_after_backward_pass()
{
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME;
assert(message_passing_state_ == message_passing_state::after_backward_pass);
double lb = constant_;
// TODO: works only for non-split BDDs
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
const auto [first,last] = bdd_index_range(bdd_nr, 0);
assert(first+1 == last);
lb += bdd_branch_nodes_[first].m;
}
return lb;
}
template<typename BDD_BRANCH_NODE>
double bdd_sequential_base<BDD_BRANCH_NODE>::compute_lower_bound_after_forward_pass()
{
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME;
assert(message_passing_state_ == message_passing_state::after_forward_pass);
double lb = constant_;
// TODO: works only for non-split BDDs
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
const auto [first,last] = bdd_index_range(bdd_nr, nr_variables(bdd_nr)-1);
value_type bdd_lb = std::numeric_limits<value_type>::infinity();
for(size_t idx=first; idx<last; ++idx)
{
const auto mm = bdd_branch_nodes_[idx].min_marginals();
bdd_lb = std::min({bdd_lb, mm[0], mm[1]});
}
lb += bdd_lb;
}
return lb;
}
template<typename BDD_BRANCH_NODE>
typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound_per_bdd()
{
if(message_passing_state_ == message_passing_state::after_backward_pass)
{
return lower_bound_per_bdd_after_backward_pass();
}
else if(message_passing_state_ == message_passing_state::after_forward_pass)
{
return lower_bound_per_bdd_after_forward_pass();
}
else
{
assert(message_passing_state_ == message_passing_state::none);
backward_run();
return lower_bound_per_bdd_after_backward_pass();
}
}
// TODO: possibly implement template functino that takes lambda and can compute lower bound and lower bound per bdd
template<typename BDD_BRANCH_NODE>
typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound_per_bdd_after_forward_pass()
{
assert(message_passing_state_ == message_passing_state::after_forward_pass);
vector_type lbs(nr_bdds());
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
const auto [first,last] = bdd_index_range(bdd_nr, nr_variables(bdd_nr)-1);
value_type bdd_lb = std::numeric_limits<value_type>::infinity();
for(size_t idx=first; idx<last; ++idx)
{
const auto mm = bdd_branch_nodes_[idx].min_marginals();
bdd_lb = std::min({bdd_lb, mm[0], mm[1]});
}
lbs[bdd_nr] = bdd_lb;
}
return lbs;
}
template<typename BDD_BRANCH_NODE>
typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::lower_bound_per_bdd_after_backward_pass()
{
assert(message_passing_state_ == message_passing_state::after_backward_pass);
vector_type lbs(nr_bdds());
// TODO: works only for non-split BDDs
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
const auto [first,last] = bdd_index_range(bdd_nr, 0);
assert(first+1 == last);
lbs[bdd_nr] = bdd_branch_nodes_[first].m;
}
return lbs;
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::forward_run()
{
if(message_passing_state_ == message_passing_state::after_forward_pass)
return;
message_passing_state_ = message_passing_state::none;
#pragma omp parallel for schedule(static,512)
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
// TODO: This only works for non-split BDDs with exactly one root node
{
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr,0);
assert(first_bdd_node + 1 == last_bdd_node);
bdd_branch_nodes_[first_bdd_node].m = 0.0;
}
const auto [first_bdd_node, last_bdd_node] = bdd_range(bdd_nr);
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
bdd_branch_nodes_[i].prepare_forward_step();
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
bdd_branch_nodes_[i].forward_step();
}
message_passing_state_ = message_passing_state::after_forward_pass;
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::backward_run(const size_t bdd_nr)
{
const auto [first_bdd_node, last_bdd_node] = bdd_range(bdd_nr);
for(std::ptrdiff_t i=last_bdd_node-1; i>=std::ptrdiff_t(first_bdd_node); --i)
bdd_branch_nodes_[i].backward_step();
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::backward_run()
{
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME2("parallel mma backward_run");
if(message_passing_state_ == message_passing_state::after_backward_pass)
return;
message_passing_state_ = message_passing_state::none;
#pragma omp parallel for schedule(static,512)
for(std::ptrdiff_t bdd_nr=nr_bdds()-1; bdd_nr>=0; --bdd_nr)
backward_run(bdd_nr);
message_passing_state_ = message_passing_state::after_backward_pass;
}
template<typename BDD_BRANCH_NODE>
std::array<size_t,2> bdd_sequential_base<BDD_BRANCH_NODE>::bdd_index_range(const size_t bdd_nr, const size_t bdd_idx) const
{
assert(bdd_nr < nr_bdds());
assert(bdd_idx < nr_variables(bdd_nr));
const size_t first_bdd_node = bdd_variables_(bdd_nr, bdd_idx).offset;
const size_t last_bdd_node = bdd_variables_(bdd_nr, bdd_idx+1).offset;
assert(first_bdd_node < last_bdd_node);
return {first_bdd_node, last_bdd_node};
}
template<typename BDD_BRANCH_NODE>
std::array<size_t,2> bdd_sequential_base<BDD_BRANCH_NODE>::bdd_range(const size_t bdd_nr) const
{
assert(bdd_nr < nr_bdds());
const size_t first = bdd_variables_(bdd_nr, 0).offset;
const size_t last = bdd_variables_(bdd_nr+1, 0).offset;
assert(first < last);
return {first, last};
}
template<typename BDD_BRANCH_NODE>
//two_dim_variable_array<std::array<typename BDD_BRANCH_NODE::value_type,2>> bdd_sequential_base<BDD_BRANCH_NODE>::min_marginals()
two_dim_variable_array<std::array<double,2>> bdd_sequential_base<BDD_BRANCH_NODE>::min_marginals()
{
backward_run();
std::vector<size_t> nr_bdd_variables;
nr_bdd_variables.reserve(nr_bdds());
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
nr_bdd_variables.push_back(nr_variables(bdd_nr));
two_dim_variable_array<std::array<double,2>> min_margs(nr_bdd_variables);
//#pragma omp parallel for schedule(guided,128)
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
// intialize
const auto [first,last] = bdd_index_range(bdd_nr, 0);
assert(first + 1 == last);
bdd_branch_nodes_[first].m = 0.0;
for(size_t idx=0; idx<nr_variables(bdd_nr); ++idx)
{
std::array<value_type,2> mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()};
const auto [first,last] = bdd_index_range(bdd_nr, idx);
for(size_t i=first; i<last; ++i)
{
const std::array<value_type,2> cur_mm = bdd_branch_nodes_[i].min_marginals();
mm[0] = std::min(mm[0], cur_mm[0]);
mm[1] = std::min(mm[1], cur_mm[1]);
}
min_margs(bdd_nr, idx)[0] = mm[0];
min_margs(bdd_nr, idx)[1] = mm[1];
for(size_t i=first; i<last; ++i)
bdd_branch_nodes_[i].prepare_forward_step();
for(size_t i=first; i<last; ++i)
bdd_branch_nodes_[i].forward_step();
}
}
message_passing_state_ = message_passing_state::after_forward_pass;
return transpose_to_var_order(min_margs);
}
template<typename BDD_BRANCH_NODE>
std::tuple<typename bdd_sequential_base<BDD_BRANCH_NODE>::min_marginal_type, std::vector<char>> bdd_sequential_base<BDD_BRANCH_NODE>::min_marginals_stacked()
{
backward_run();
min_marginal_type min_margs(nr_bdd_variables(), 2);
std::vector<char> solutions;
solutions.reserve(nr_bdd_variables());
//#pragma omp parallel for schedule(guided,128)
size_t c = 0;
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
// intialize
const auto [first,last] = bdd_index_range(bdd_nr, 0);
assert(first + 1 == last);
const value_type bdd_lb = bdd_branch_nodes_[first].m;
bdd_branch_nodes_[first].m = 0.0;
size_t next_node = first;
for(size_t idx=0; idx<nr_variables(bdd_nr); ++idx, ++c)
{
std::array<value_type,2> mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()};
const auto [first,last] = bdd_index_range(bdd_nr, idx);
for(size_t i=first; i<last; ++i)
{
const std::array<value_type,2> cur_mm = bdd_branch_nodes_[i].min_marginals();
mm[0] = std::min(mm[0], cur_mm[0]);
mm[1] = std::min(mm[1], cur_mm[1]);
// see if active path points to current node;
if(next_node == i)
{
if(cur_mm[0] < cur_mm[1])
{
assert(std::abs(bdd_lb - mm[0]) <= 1e-6);
solutions.push_back(0);
if(bdd_branch_nodes_[i].offset_low == BDD_BRANCH_NODE::terminal_0_offset)
{
assert(false); // this cannot happen
}
else if(bdd_branch_nodes_[i].offset_low == BDD_BRANCH_NODE::terminal_1_offset)
{
// we have arrived at the last variable of the bdd
assert(idx+1 == nr_variables(bdd_nr));
}
else
{
next_node = std::distance(&bdd_branch_nodes_[0], bdd_branch_nodes_[i].address(bdd_branch_nodes_[i].offset_low));
}
}
else
{
assert(std::abs(bdd_lb - mm[1]) <= 1e-6);
solutions.push_back(1);
if(bdd_branch_nodes_[i].offset_high == BDD_BRANCH_NODE::terminal_0_offset)
{
assert(false); // this cannot happen
}
else if(bdd_branch_nodes_[i].offset_high == BDD_BRANCH_NODE::terminal_1_offset)
{
// we have arrived at the last variable of the bdd
assert(idx+1 == nr_variables(bdd_nr));
}
else
{
next_node = std::distance(&bdd_branch_nodes_[0], bdd_branch_nodes_[i].address(bdd_branch_nodes_[i].offset_high));
}
}
}
}
min_margs(int(c),0) = mm[0];
min_margs(int(c),1) = mm[1];
for(size_t i=first; i<last; ++i)
bdd_branch_nodes_[i].prepare_forward_step();
for(size_t i=first; i<last; ++i)
bdd_branch_nodes_[i].forward_step();
}
}
message_passing_state_ = message_passing_state::after_forward_pass;
std::cout << "solutions size " << solutions.size() << "\n";
return {min_margs, solutions};
}
template<typename BDD_BRANCH_NODE>
typename bdd_sequential_base<BDD_BRANCH_NODE>::vector_type bdd_sequential_base<BDD_BRANCH_NODE>::get_costs()
{
vector_type costs(nr_bdd_variables());
size_t c = 0;
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t idx=0; idx<nr_variables(bdd_nr); ++idx)
{
const auto [first,last] = bdd_index_range(bdd_nr, idx);
for(size_t i=first; i<last; ++i)
{
const auto& bdd = bdd_branch_nodes_[i];
if(bdd.offset_low != BDD_BRANCH_NODE::terminal_0_offset)
assert(bdd.low_cost == 0.0);
if(bdd.offset_high != BDD_BRANCH_NODE::terminal_0_offset)
{
costs[c++] = bdd.high_cost;
break;
}
}
}
}
assert(costs.size() == nr_bdd_variables());
return costs;
}
template<typename BDD_BRANCH_NODE>
template<typename COST_ITERATOR>
void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(COST_ITERATOR cost_lo_begin, COST_ITERATOR cost_lo_end, COST_ITERATOR cost_hi_begin, COST_ITERATOR cost_hi_end)
{
message_passing_state_ = message_passing_state::none;
lower_bound_state_ = lower_bound_state::invalid;
auto get_lo_cost = [&](const size_t var) {
if(var < std::distance(cost_lo_begin, cost_lo_end) && var < nr_variables())
return *(cost_lo_begin+var)/double(nr_bdds(var));
else
return 0.0;
};
auto get_hi_cost = [&](const size_t var) {
if(var < std::distance(cost_hi_begin, cost_hi_end) && var < nr_variables())
return *(cost_hi_begin+var)/double(nr_bdds(var));
else
return 0.0;
};
//#pragma omp parallel for schedule(guided,128)
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx);
const size_t var = variable(bdd_nr, bdd_idx);
const double lo_cost = get_lo_cost(var);
assert(std::isfinite(lo_cost));
const double hi_cost = get_hi_cost(var);
assert(std::isfinite(hi_cost));
for(size_t i=first_node; i<last_node; ++i)
{
if(bdd_branch_nodes_[i].offset_low == BDD_BRANCH_NODE::terminal_0_offset)
assert(bdd_branch_nodes_[i].low_cost == std::numeric_limits<decltype(bdd_branch_nodes_[i].low_cost)>::infinity());
if(bdd_branch_nodes_[i].offset_high == BDD_BRANCH_NODE::terminal_0_offset)
assert(bdd_branch_nodes_[i].high_cost == std::numeric_limits<decltype(bdd_branch_nodes_[i].high_cost)>::infinity());
if(bdd_branch_nodes_[i].offset_low != BDD_BRANCH_NODE::terminal_0_offset)
bdd_branch_nodes_[i].low_cost += lo_cost;
if(bdd_branch_nodes_[i].offset_high != BDD_BRANCH_NODE::terminal_0_offset)
bdd_branch_nodes_[i].high_cost += hi_cost;
}
}
}
// go over all cost entries and add then to constant if they are not in any BDD.
for(size_t i=0; i<std::max(std::distance(cost_lo_begin, cost_lo_end), std::distance(cost_hi_begin, cost_hi_end)); ++i)
{
if(i >= nr_variables() || nr_bdds(i) == 0)
{
const double lo_cost = get_lo_cost(i);
const double hi_cost = get_hi_cost(i);
constant_ += std::min(lo_cost, hi_cost);
}
}
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(const two_dim_variable_array<std::array<typename BDD_BRANCH_NODE::value_type,2>>& delta)
{
message_passing_state_ = message_passing_state::none;
assert(delta.size() == nr_bdds());
const auto delta_t = transpose_to_bdd_order(delta);
#pragma omp parallel for schedule(static,512)
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx);
for(size_t i=first_node; i<last_node; ++i)
{
bdd_branch_nodes_[i].low_cost += delta_t(bdd_nr, bdd_idx)[0];
bdd_branch_nodes_[i].high_cost += delta_t(bdd_nr, bdd_idx)[1];
}
}
}
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(const min_marginal_type& delta)
{
message_passing_state_ = message_passing_state::none;
assert(delta.rows() == nr_bdd_variables());
assert(delta.cols() == 2);
//#pragma omp parallel for schedule(guided,128)
size_t c = 0;
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx, ++c)
{
const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx);
for(size_t i=first_node; i<last_node; ++i)
{
bdd_branch_nodes_[i].low_cost += delta(c, 0);
bdd_branch_nodes_[i].high_cost += delta(c, 1);
}
}
}
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::update_costs(const vector_type& delta)
{
message_passing_state_ = message_passing_state::none;
assert(delta.rows() == nr_bdd_variables());
assert(delta.cols() == 1);
//#pragma omp parallel for schedule(guided,128)
size_t c = 0;
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx, ++c)
{
const auto [first_node, last_node] = bdd_index_range(bdd_nr, bdd_idx);
for(size_t i=first_node; i<last_node; ++i)
{
bdd_branch_nodes_[i].high_cost += delta(c, 0);
}
}
}
}
template<typename BDD_BRANCH_NODE>
template<typename ITERATOR>
void bdd_sequential_base<BDD_BRANCH_NODE>::fix_variables(ITERATOR zero_fixations_begin, ITERATOR zero_fixations_end, ITERATOR one_fixations_begin, ITERATOR one_fixations_end)
{
// TODO: check for variables that are not covered by any BDD. They might change the constant_
std::unordered_set<size_t> zero_fixations(zero_fixations_begin, zero_fixations_end);
std::unordered_set<size_t> one_fixations(one_fixations_begin, one_fixations_end);
assert(zero_fixations.size() + one_fixations.size() > 0);
for(const size_t v : zero_fixations)
assert(nr_bdds(v) > 0);
for(const size_t v : one_fixations)
assert(nr_bdds(v) > 0);
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const size_t var = variable(bdd_nr, bdd_idx);
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx);
assert(!(zero_fixations.count(var) > 0 && one_fixations.count(var) > 0));
if(zero_fixations.count(var) > 0)
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
bdd_branch_nodes_[i].high_cost = std::numeric_limits<value_type>::infinity();
if(one_fixations.count(var) > 0)
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
bdd_branch_nodes_[i].low_cost = std::numeric_limits<value_type>::infinity();
}
}
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::diffusion_step(const two_dim_variable_array<std::array<typename BDD_BRANCH_NODE::value_type,2>>& min_margs, const value_type damping_step)
{
throw std::runtime_error("not correct yet");
message_passing_state_ = message_passing_state::none;
assert(min_margs.size() == nr_bdds());
assert(damping_step >= 0.0 && damping_step <= 1.0);
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
assert(min_margs.size(bdd_nr) == nr_bdd_variables(bdd_nr));
for(size_t bdd_idx=0; bdd_idx<nr_bdd_variables(bdd_nr); ++bdd_idx)
{
const size_t var = variable(bdd_nr, bdd_idx);
const value_type denom = 1.0 / (nr_bdds(var)-1);
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx);
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
{
bdd_branch_nodes_[i].low_cost -= min_margs(bdd_nr, bdd_idx)[0];
bdd_branch_nodes_[i].high_cost -= min_margs(bdd_nr, bdd_idx)[1];
}
}
}
}
template<typename REAL>
void atomic_add(REAL& f, const REAL d)
{
if(d == 0)
return;
// TODO: use std::atomic_ref when available in C++20
Foo::atomic_ref<REAL> f_ref{f};
f_ref += d;
}
template<typename REAL>
void atomic_store(REAL& f, const REAL d)
{
Foo::atomic_ref<REAL> f_ref{f};
f_ref.store(d);
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::forward_mm(
const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega,
std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect,
std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute)
{
assert(mms_to_collect.size() == nr_variables());
assert(mms_to_distribute.size() == nr_variables());
assert(omega > 0.0 && omega <= 1.0);
assert(bdd_nr < nr_bdds());
{
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, 0);
assert(first_bdd_node + 1 == last_bdd_node);
bdd_branch_nodes_[first_bdd_node].m = 0.0;
}
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx);
const size_t var = variable(bdd_nr, bdd_idx);
std::array<value_type,2> cur_mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()};
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
{
const auto bdd_mm = bdd_branch_nodes_[i].min_marginals();
cur_mm[0] = std::min(bdd_mm[0], cur_mm[0]);
cur_mm[1] = std::min(bdd_mm[1], cur_mm[1]);
}
if(!std::isfinite(cur_mm[0]))
atomic_store(mms_to_collect[var][0], std::numeric_limits<value_type>::infinity());
if(!std::isfinite(cur_mm[1]))
atomic_store(mms_to_collect[var][1], std::numeric_limits<value_type>::infinity());
if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1]))
{
if(cur_mm[0] < cur_mm[1])
atomic_add(mms_to_collect[var][1], omega*(cur_mm[1] - cur_mm[0]));
else
atomic_add(mms_to_collect[var][0], omega*(cur_mm[0] - cur_mm[1]));
}
assert(mms_to_collect[var][0] >= 0.0);
assert(mms_to_collect[var][1] >= 0.0);
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
{
if(!std::isfinite(cur_mm[0]))
bdd_branch_nodes_[i].low_cost = std::numeric_limits<value_type>::infinity();
if(!std::isfinite(cur_mm[1]))
bdd_branch_nodes_[i].high_cost = std::numeric_limits<value_type>::infinity();
if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1]))
{
//bdd_branch_nodes_[i].low_cost += std::min(omega*(cur_mm[1] - cur_mm[0]), value_type(0.0));
//bdd_branch_nodes_[i].high_cost += std::min(omega*(cur_mm[0] - cur_mm[1]), value_type(0.0));
if(cur_mm[0] < cur_mm[1])
bdd_branch_nodes_[i].high_cost += omega*(cur_mm[0] - cur_mm[1]);
else
bdd_branch_nodes_[i].low_cost += omega*(cur_mm[1] - cur_mm[0]);
}
}
if(bdd_idx+1<nr_variables(bdd_nr))
{
const auto [next_first_bdd_node, next_last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx+1);
for(size_t i=next_first_bdd_node; i<next_last_bdd_node; ++i)
bdd_branch_nodes_[i].m = std::numeric_limits<value_type>::infinity();
}
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
{
bdd_branch_nodes_[i].low_cost += mms_to_distribute[var][0];
bdd_branch_nodes_[i].high_cost += mms_to_distribute[var][1];
bdd_branch_nodes_[i].forward_step();
}
}
}
template<typename BDD_BRANCH_NODE>
typename BDD_BRANCH_NODE::value_type
bdd_sequential_base<BDD_BRANCH_NODE>::backward_mm(const size_t bdd_nr, const typename BDD_BRANCH_NODE::value_type omega, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_collect, std::vector<std::array<typename BDD_BRANCH_NODE::value_type,2>>& mms_to_distribute)
{
assert(mms_to_collect.size() == nr_variables());
assert(mms_to_distribute.size() == nr_variables());
assert(omega > 0.0 && omega <= 1.0);
assert(bdd_nr < nr_bdds());
for(std::ptrdiff_t bdd_idx=nr_variables(bdd_nr)-1; bdd_idx>=0; --bdd_idx)
{
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx);
const size_t var = variable(bdd_nr, bdd_idx);
std::array<value_type,2> cur_mm = {std::numeric_limits<value_type>::infinity(), std::numeric_limits<value_type>::infinity()};
for(std::ptrdiff_t i=std::ptrdiff_t(last_bdd_node)-1; i>=std::ptrdiff_t(first_bdd_node); --i)
{
const auto bdd_mm = bdd_branch_nodes_[i].min_marginals();
cur_mm[0] = std::min(bdd_mm[0], cur_mm[0]);
cur_mm[1] = std::min(bdd_mm[1], cur_mm[1]);
}
if(!std::isfinite(cur_mm[0]))
atomic_store(mms_to_collect[var][0], std::numeric_limits<value_type>::infinity());
if(!std::isfinite(cur_mm[1]))
atomic_store(mms_to_collect[var][1], std::numeric_limits<value_type>::infinity());
if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1]))
{
if(cur_mm[0] < cur_mm[1])
atomic_add(mms_to_collect[var][1], omega*(cur_mm[1] - cur_mm[0]));
else
atomic_add(mms_to_collect[var][0], omega*(cur_mm[0] - cur_mm[1]));
}
assert(mms_to_collect[var][0] >= 0.0);
assert(mms_to_collect[var][1] >= 0.0);
for(std::ptrdiff_t i=std::ptrdiff_t(last_bdd_node)-1; i>=std::ptrdiff_t(first_bdd_node); --i)
{
if(!std::isfinite(cur_mm[0]))
bdd_branch_nodes_[i].low_cost = std::numeric_limits<value_type>::infinity();
if(!std::isfinite(cur_mm[1]))
bdd_branch_nodes_[i].high_cost = std::numeric_limits<value_type>::infinity();
if(std::isfinite(cur_mm[0]) && std::isfinite(cur_mm[1]))
{
//bdd_branch_nodes_[i].low_cost += std::min(omega*(cur_mm[1] - cur_mm[0]), value_type(0.0));
//bdd_branch_nodes_[i].high_cost += std::min(omega*(cur_mm[0] - cur_mm[1]), value_type(0.0));
if(cur_mm[0] < cur_mm[1])
bdd_branch_nodes_[i].high_cost += omega*(cur_mm[0] - cur_mm[1]);
else
bdd_branch_nodes_[i].low_cost += omega*(cur_mm[1] - cur_mm[0]);
}
}
for(std::ptrdiff_t i=std::ptrdiff_t(last_bdd_node)-1; i>=std::ptrdiff_t(first_bdd_node); --i)
{
bdd_branch_nodes_[i].low_cost += mms_to_distribute[var][0];
bdd_branch_nodes_[i].high_cost += mms_to_distribute[var][1];
bdd_branch_nodes_[i].backward_step();
}
}
const auto [root_bdd_node_begin, root_bdd_node_end] = bdd_index_range(bdd_nr, 0);
assert(root_bdd_node_begin+1 == root_bdd_node_end);
return bdd_branch_nodes_[root_bdd_node_begin].m;
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::parallel_mma()
{
backward_run();
auto reset_mms = [&](std::vector<std::array<value_type,2>>& mms) {
assert(mms.size() == nr_variables());
std::fill(mms.begin(), mms.end(), std::array<value_type,2>{0.0,0.0});
};
auto init_mms = [&](std::vector<std::array<value_type,2>>& mms) {
if(mms.size() != nr_variables())
{
assert(mms.size() == 0.0);
mms = std::vector<std::array<value_type,2>>(nr_variables(), {0.0,0.0});
}
};
auto average_mms = [&](std::vector<std::array<value_type,2>>& mms) {
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME2("parallel mma marginal averaging");
#pragma omp parallel for
for(size_t var=0; var<nr_variables(); ++var)
{
assert(nr_bdds(var) > 0);
mms[var][0] /= value_type(nr_bdds(var));
mms[var][1] /= value_type(nr_bdds(var));
}
};
init_mms(mms_to_collect_);
init_mms(mms_to_distribute_);
double lb = constant_;
{
MEASURE_CUMULATIVE_FUNCTION_EXECUTION_TIME2("parallel mma incremental marginal computation");
#pragma omp parallel for schedule(static,256)
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
forward_mm(bdd_nr, 0.5, mms_to_collect_, mms_to_distribute_);
average_mms(mms_to_collect_);
reset_mms(mms_to_distribute_);
std::swap(mms_to_collect_, mms_to_distribute_);
#pragma omp parallel for schedule(static,256) reduction(+:lb)
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
lb += backward_mm(bdd_nr, 0.5, mms_to_collect_, mms_to_distribute_);
average_mms(mms_to_collect_);
reset_mms(mms_to_distribute_);
std::swap(mms_to_collect_, mms_to_distribute_);
}
lower_bound_ = lb;
message_passing_state_ = message_passing_state::after_backward_pass;
lower_bound_state_ = lower_bound_state::valid;
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::distribute_delta()
{
message_passing_state_ = message_passing_state::none;
lower_bound_state_ = lower_bound_state::invalid;
assert(mms_to_distribute_.size() == nr_variables());
#pragma omp parallel for
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const auto [first_bdd_node, last_bdd_node] = bdd_index_range(bdd_nr, bdd_idx);
const size_t var = variable(bdd_nr, bdd_idx);
for(size_t i=first_bdd_node; i<last_bdd_node; ++i)
{
bdd_branch_nodes_[i].low_cost += mms_to_distribute_[var][0];
bdd_branch_nodes_[i].high_cost += mms_to_distribute_[var][1];
}
}
}
const std::array<typename BDD_BRANCH_NODE::value_type,2> zeros = {0.0, 0.0};
std::fill(mms_to_distribute_.begin(), mms_to_distribute_.end(), zeros);
}
template<typename BDD_BRANCH_NODE>
template<typename T>
two_dim_variable_array<T> bdd_sequential_base<BDD_BRANCH_NODE>::transpose_to_var_order(const two_dim_variable_array<T>& m) const
{
assert(m.size() == nr_bdds());
std::vector<size_t> counter(nr_variables(), 0);
two_dim_variable_array<T> transposed(nr_bdds_per_variable_);
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
assert(m.size(bdd_nr) == nr_variables(bdd_nr));
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const size_t var = variable(bdd_nr, bdd_idx);
transposed(var, counter[var]++) = m(bdd_nr, bdd_idx);
}
}
return transposed;
}
template<typename BDD_BRANCH_NODE>
template<typename T>
two_dim_variable_array<T> bdd_sequential_base<BDD_BRANCH_NODE>::transpose_to_bdd_order(const two_dim_variable_array<T>& m) const
{
assert(m.size() == nr_variables());
std::vector<size_t> counter;
counter.reserve(nr_bdds());
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
counter.push_back(nr_variables(bdd_nr));
two_dim_variable_array<T> transposed(counter);
counter.clear();
counter.resize(nr_variables(),0);
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx)
{
const size_t var = variable(bdd_nr, bdd_idx);
transposed(bdd_nr, bdd_idx) = m(var, counter[var]++);
}
}
return transposed;
}
template<typename BDD_BRANCH_NODE>
Eigen::SparseMatrix<typename BDD_BRANCH_NODE::value_type> bdd_sequential_base<BDD_BRANCH_NODE>::Lagrange_constraint_matrix() const
{
using T = Eigen::Triplet<value_type>;
std::vector<T> coefficients;
size_t c = 0;
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
for(size_t bdd_idx=0; bdd_idx<nr_variables(bdd_nr); ++bdd_idx, ++c)
{
const size_t var = variable(bdd_nr, bdd_idx);
coefficients.push_back(T(var,c,1));
}
}
Eigen::SparseMatrix<value_type> A(nr_variables(), nr_bdd_variables());
A.setFromTriplets(coefficients.begin(), coefficients.end());
return A;
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::export_graphviz(const char* filename)
{
const std::string f(filename);
export_graphviz(f);
}
template<typename BDD_BRANCH_NODE>
void bdd_sequential_base<BDD_BRANCH_NODE>::export_graphviz(const std::string& filename)
{
const std::string base_filename = std::filesystem::path(filename).replace_extension("").c_str();
for(size_t bdd_nr=0; bdd_nr<nr_bdds(); ++bdd_nr)
{
const std::string dot_file = base_filename + "_" + std::to_string(bdd_nr) + ".dot";
std::fstream f;
f.open(dot_file, std::fstream::out | std::ofstream::trunc);
export_graphviz(f, bdd_nr);
f.close();
const std::string png_file = base_filename + "_" + std::to_string(bdd_nr) + ".png";
const std::string convert_command = "dot -Tpng " + dot_file + " > " + png_file;
std::system(convert_command.c_str());
}
}
template<typename BDD_BRANCH_NODE>
template<typename STREAM>
void bdd_sequential_base<BDD_BRANCH_NODE>::export_graphviz(STREAM& s, const size_t bdd_nr)
{
s << "digraph BDD\n";
s << "{\n";
const auto [first,last] = bdd_range(bdd_nr);
for(size_t bdd_idx=first; bdd_idx<last; ++bdd_idx)
{
auto& bdd = bdd_branch_nodes_[bdd_idx];
if(bdd.offset_low != BDD_BRANCH_NODE::terminal_0_offset && bdd.offset_low != BDD_BRANCH_NODE::terminal_1_offset)
{
s << "\"" << &bdd << "\" -> \"" << bdd.address(bdd.offset_low) << "\" [ style=\"dashed\"];\n";;
}
else
{
s << "\"" << &bdd << "\" -> " << " bot [style=\"dashed\"];\n";
}
if(bdd.offset_high != BDD_BRANCH_NODE::terminal_0_offset && bdd.offset_high != BDD_BRANCH_NODE::terminal_1_offset)
{
s << "\"" << &bdd << "\" -> \"" << bdd.address(bdd.offset_high) << "\";\n";
}
else
{
s << "\"" << &bdd << "\" -> " << " top;\n";
}
}
s << "}\n";
}
}
|
matrix_multiply_omp_cache_optimized.c | /************************************************************
Author : Ali Snedden
Date : 8/21/18
License: MIT
Purpose:
This is a program that multiplies two matrices.
Debug :
Notes :
1. To run :
export OMP_NUM_THREADS=20
gcc -O3 -fopenmp src/matrix_multiply_omp.c ### -O3 is critical
Good Weblinks:
1. Unified Memory : https://devblogs.nvidia.com/unified-memory-cuda-beginners/
Future :
1. Try managing memory directly on Host and Device.
************************************************************/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <omp.h>
/**********************************
ARGS:
RETURN:
DESCRIPTION:
Map 2D indices to 1D index
DEBUG:
1. read_numpy_matrix() uses this function extensively.
Directly compared output from read_numpy_matrix() with input
and was IDENTICAL. This could not work if map_idx() didn't
function correctly.
FUTURE:
1. Add error checking if not too expensive
***********************************/
int map_idx(int i, int j, int Ny){
return (Ny * i + j);
}
/********************************************************
ARGS:
DESCRIPTION:
RETURN:
DEBUG:
NOTES:
FUTURE:
*******************************************************/
void exit_with_error(char * message){
fprintf(stderr, "%s\n", message);
fflush(stderr);
exit(1);
}
/**********************************
ARGS:
path = path to file to read
dim = dimension of returned matrix, expected to be len = 2
RETURN:
DESCRIPTION:
Map 2D indices to 1D index
DEBUG:
1. Printed out read in matrix. used 'diff' to compare
with original. Was IDENTICAL
--> This function WORKS!
FUTURE:
1. Add error checking if not too expensive
***********************************/
float * read_numpy_matrix_row_majored(char* path, int * dim){
char * line= NULL;
char * entireFile = NULL;
char * pch = NULL; // Used for parsing strings w strtok
char errStr[500];
int fileSize = -1;
int nline = 0;
int maxchar = 0; // Maximum number of characters in a lines
int nchar = 0; // Number of characters in line
int ncols = -1; // Ncolumns in each row.. should be the same for each row
int ncolsThisRow = 0;
int i = 0;
int j = 0;
int n = 0; // Index to loop thru _all_ file chars
float * matrix = NULL;
FILE * f = fopen(path, "r");
printf("\treading : %s\n", path);
fflush(stdout);
//Error check
if(f == NULL){
sprintf(errStr, "ERROR!!! %s cannot be opened", path);
exit_with_error(errStr);
}
//Get file size
fseek(f, 0, SEEK_END);
fileSize = ftell(f); // Total num chars in file
rewind(f);
//Read entire file
entireFile = (char* )malloc(sizeof(char) * fileSize);
fread(entireFile, sizeof(char), fileSize, f);
rewind(f);
//Find number of lines and maxchar per line...
for(n=0; n<fileSize; n++){
if(entireFile[n] == ' '){
ncolsThisRow++;
}
if(entireFile[n] == '\n'){
maxchar = nchar > maxchar ? nchar : maxchar;
//Must set at first
if(nline == 0){
ncols = ncolsThisRow;
//Oops, rows aren't the same size.
}else if(ncols != ncolsThisRow){
sprintf(errStr, "ERROR!!! nchar %i != ncolsThisRow %i\n", nchar, ncolsThisRow);
exit_with_error(errStr);
}
ncolsThisRow=0;
nchar = 0;
nline++;
}
nchar++;
}
maxchar = maxchar + 1; //+1 for null terminator?
printf("\tdim = [nline, ncols] = [%i, %i], maxchar = %i \n", nline, ncols, maxchar);
fflush(stdout);
// Done with busy work - now allocate memory, read in array
matrix = (float *)malloc(nline * maxchar * sizeof(float));
line = (char *)malloc(sizeof(char) * maxchar);
i = 0;
while(feof(f) == 0){
if(fgets(line, maxchar, f) == NULL){
printf("\tEnd of File Reached\n\n");
//sprintf(errStr, "ERROR!!! in reading 'line'\n");
//exit_with_error(errStr);
}
// Parse line in file
pch = strtok(line," ");
j = 0;
while(pch != NULL){
matrix[map_idx(i,j,ncols)] = (float)atof(pch);
pch = strtok(NULL, " ");
j++;
}
i++;
}
/* Debug
for(i=0; i<nline; i++){
for(j=0; j<ncols; j++){
printf("%.1f ", matrix[map_idx(i,j,ncols)]);
}
printf("\n");
}*/
free(line);
free(entireFile);
fclose(f);
dim[0] = nline;
dim[1] = ncols;
return matrix;
}
/*************************************************************
ARGS:
float * A : 2 x 2 matrix, stored as row majored in 1D array
int * dim : len(dim) = 2
RETURN:
-> newM, is a matrix that is column ordered matrix.
-> dim is unchanged
DESCRIPTION:
Take Transpose
DEBUG:
1. Spot checked beginning, middle and end of matrix. It appears
that I correctly switched from row-majored to column majored
matrix
FUTURE:
**************************************************************/
float * reorder_row_major_as_col_major(float * B, int * dim){
int i,j; // Indices
float * newM = (float *)malloc(sizeof(float) * dim[0] * dim[1]);
//rows
for(i=0; i<dim[0]; i++){
for(j=0; j<dim[1]; j++){
newM[map_idx(j,i,dim[0])] = B[map_idx(i,j,dim[1])]; // dim[0] or dim[1] for newM?
//newM[map_idx(i,j,dim[0])] = B[map_idx(j,i,dim[1])]; // dim[0] or dim[1] for newM?
}
}
printf("Re-ordering matrix B...\n");
free(B);
return(newM);
}
/**********************************
ARGS:
array1D : 'flattened' 2D array as 1D
N : length of array
RETURN:
N/A
DESCRIPTION:
Prints 1D array and 3D coords
DEBUG:
1. spot checked, it works
FUTURE:
***********************************/
void write_1D_array(float * array1D, int Nx, int Ny, FILE * f){
int i = 0;
int j = 0;
int idx = 0;
for(i=0; i<Nx; i++){
for(j=0; j<Ny; j++){
idx = map_idx(i,j,Ny);
fprintf(f, "%*.1f ", 5, array1D[idx]);
}
fprintf(f, "\n");
}
}
/**********************************
ARGS:
array1D : 'flattened' 2D array as 1D
N : length of array
RETURN:
N/A
DESCRIPTION:
Prints 1D array and 3D coords
DEBUG:
1. spot checked, it works
FUTURE:
***********************************/
void print_1D_array(float * array1D, int Nx, int Ny){
int i = 0;
int j = 0;
int idx = 0;
for(i=0; i<Nx; i++){
for(j=0; j<Ny; j++){
idx = map_idx(i,j,Ny);
printf("%*.1f ", 5, array1D[idx]);
}
printf("\n");
}
}
/********************************************************
ARGS:
DESCRIPTION:
RETURN:
DEBUG:
NOTES:
1. Use 'flattened' 2D array
FUTURE:
*******************************************************/
void initialize_matrix(float *A, int * dim, float value){
int i;
int j;
for(i=0; i<dim[0]; i++){
for(j=0; j<dim[1]; j++){
//A[i*dim[0]+j] = value;
A[map_idx(i,j,dim[1])] = value;
}
}
}
/********************************************************
ARGS:
A : 'flattened' 2d matrix. row majored
B : 'flattened' 2d matrix. column majored
dimA : gives x & y dims
dimB : gives x & y dims
dimAB: pointer modified to return size of new matrix
DESCRIPTION:
Multiply A*B : Check dims. Expect only 2 dimensions
for dimA and dimB.
RETURN:
DEBUG:
1. created code, matrix_generator.py, that multiplies two matrices and
saves the input and output to a file. I read in data/A.txt, data/B.txt
and used this function to multiply the matrices. Printed the output and
compared to data/AB.txt. It was IDENTICAL.
--> This function works!
NOTES:
FUTURE:
*******************************************************/
float * omp_matrix_multiply(float * A, float * B, int * dimA, int * dimB, int * dimAB)
{
int j = 0; // Iterate over elements, do dot product
int ai = 0; // Index iterating over rows in A
int bj = 0; // Index iterating over columns in B
int tid;
int nthreads;
float sum = 0;
char errStr[500];
float * result = (float *)malloc(sizeof(float) * dimA[0] * dimB[1]);
// Error Check
if(dimA[1] != dimB[0]){
sprintf(errStr, "ERROR!! dimension mismatch, %i != %i\n", dimA[1], dimB[0]);
exit_with_error(errStr);
}
#pragma omp parallel private(nthreads, tid, sum, bj, ai, j) shared(dimA, dimB, result)
{
#if defined(_OPENMP)
tid = omp_get_thread_num();
printf("%i / %i reporting for duty\n", tid, omp_get_num_threads());
#endif
#pragma omp for
for(ai=0; ai<dimA[0]; ai++){
for(j=0; j<dimB[1]; j++){
sum = 0;
for(bj=0; bj<dimB[0]; bj++){
//for(j=0; j<dimA[1]; j++){
//printf("%.0f * %0.f\n", A[map_idx(ai, j, dimA[1])],
// B[map_idx(j, bj, dimB[1])]);
//sum += A[map_idx(ai, j, dimA[1])] * B[map_idx(j, bj, dimB[1])];
sum += A[map_idx(ai, bj, dimA[1])] * B[map_idx(j, bj, dimB[0])];
//}
}
result[map_idx(ai,j,dimB[1])] = sum;
}
}
}
dimAB[0] = dimA[0];
dimAB[1] = dimB[1];
return result;
}
/********************************************************
ARGS:
At command line :
matrix_multiply path/to/A.txt path/to/B.txt path/to/output/
DESCRIPTION:
RETURN:
DEBUG:
NOTES:
FUTURE:
*******************************************************/
int main(int argc, char * argv[])
{
// Declare variables
char path[100];
char errStr[500];
int nDev = 0; //Number of devices
int * dimA = NULL;
int * dimB = NULL;
int * dimAB = NULL;
float *A = NULL;
float *B = NULL;
float *AB = NULL;
float *answer = NULL;
FILE * fout = NULL;
dimA = (int *) malloc(2 * sizeof(int));
dimB = (int *) malloc(2 * sizeof(int));
dimAB= (int *) malloc(2 * sizeof(int));
printf("Running matrix_multiply_omp_cache_optimized.c ...\n");
//sprintf(path, "data/very_large/A.txt");
A = read_numpy_matrix_row_majored(argv[1], dimA);
//sprintf(path, "data/very_large/B.txt");
B = read_numpy_matrix_row_majored(argv[2], dimB);
B = reorder_row_major_as_col_major(B, dimB);
dimAB[0] = dimA[0];
dimAB[1] = dimB[1];
//AB = cpu_matrix_multiply(A, B, dimA, dimB, dimAB);
fflush(stdout);
time_t start = time(NULL);
AB = omp_matrix_multiply(A, B, dimA, dimB, dimAB);
printf("Run time : %.3f s\n", difftime(time(NULL), start));
// Output
sprintf(path, "%s/AB_result.txt", argv[3]);
fout = fopen(path, "w+");
if(fout == NULL){
sprintf(errStr, "ERROR!! Cannot create, %s\n", path);
exit_with_error(errStr);
}
write_1D_array(AB, dimAB[0], dimAB[1], fout);
fclose(fout);
free(dimA);
free(dimB);
free(dimAB);
free(A);
free(B);
free(AB);
return 0;
}
|
test.c | #define N 1024
#define _GNU_SOURCE
#include <link.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
// If one of the libomptarget plugins has been loaded, it means we are running
// with libomptarget. libomptarget.so is also used by LOMP, so we need to check
// for libomptarget.rtl.*.
static int isLibomptarget(struct dl_phdr_info *info, size_t size,
void *data) {
if (strstr(info->dlpi_name, "libomptarget.rtl") != NULL) {
*((int *) data) = 1;
return 1;
}
return 0;
}
#define TEST_NESTED 1
#define TEST_CONCURRENT 1
#define TEST_CONCURRENT_TF 1
#define TEST_PARALLEL1 1
int a[N], b[N];
int main() {
int i;
int error, totError = 0;
#if TEST_NESTED
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps completed successfully\n");
} else {
printf(" test with nested maps completed with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT_TF
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent with to/from maps completed successfully\n");
} else {
printf(" test with concurrent with to/from maps completed with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT
// This test cannot run correctly with libomptarget because the library does
// not support proper async. Fake the output in this case.
int libomptargetInUse = 0;
dl_iterate_phdr(isLibomptarget, &libomptargetInUse);
if (libomptargetInUse) {
printf(" test with concurrent maps completed successfully\n");
} else {
// Run actual test
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent maps completed successfully\n");
} else {
printf(" test with concurrent maps completed with %d error(s)\n", error);
totError++;
}
}
#endif
#if TEST_PARALLEL1
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp parallel num_threads(1)
{
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps and Parallel 1 thread completed successfully\n");
} else {
printf(" test with nested maps and Parallel 1 thread completed with %d error(s)\n", error);
totError++;
}
#endif
printf("completed with %d errors\n", totError);
return totError;
}
|
nukedclan_fmt_plug.c | /* Nuked-Klan CMS DB cracker patch for JtR. Hacked together during
* July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$nk$*HASHKEY*hash
*
* Where,
*
* HASHKEY => hex(HASHKEY value found in conf.inc.php)
*
* Modified by JimF, Jul 2012. About 6x speed improvements.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_nk;
#elif FMT_REGISTERS_H
john_register_one(&fmt_nk);
#else
#include <string.h>
#include "arch.h"
#include "md5.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "common.h"
#ifdef _OPENMP
#include <omp.h>
// Tuned on core i7 quad HT
// 1 5059K
// 16 8507k
// 64 8907k ** this was chosen.
// 128 8914k
// 256 8810k
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "nk"
#define FORMAT_NAME "Nuked-Klan CMS"
#define FORMAT_TAG "$nk$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1 /* change to 0 once there's any speedup for "many salts" */
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH (4+32+40+3+1)
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
static struct fmt_tests nk_tests[] = {
{"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#17737d3661312121d5ae7d5c6156c0298", "openwall"},
{"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#5c20384512ee36590f5f0ab38a46c6ced", "password"},
// from pass_gen.pl
{"$nk$*503476424c5362476f36463630796a6e6c656165*#2f27c20e65b88b76c913115cdec3d9a18", "test1"},
{"$nk$*7a317a71794339586c434d50506b6e4356626a67*#b62a615f605c2fd520edde76577d30f90", "thatsworking"},
{"$nk$*796b7375666d7545695032413769443977644132*#4aec90bd9a930faaa42a0d7d40056132e", "test3"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
unsigned char HASHKEY[41];
int decal;
} *cur_salt;
static inline void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
memcpy(out, ciphertext, CIPHERTEXT_LENGTH);
out[CIPHERTEXT_LENGTH] = 0;
strlwr(out);
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip leading "$nk$*" */
if (!(ptr = strtokm(ctcopy, "*")))
goto error;
/* HASHKEY is of fixed length 40 */
if(hexlenl(ptr, &extra) != 40 || extra)
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
/* skip two characters, for "nk_tests[]" this is '#'
* followed by decal value */
if (strlen(ptr) <= 2)
goto error;
ptr += 2;
/* hash is of fixed length 32 */
if(hexlenl(ptr, &extra) != 32 || extra)
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char _ctcopy[256], *ctcopy=_ctcopy;
char *p;
int i;
strnzcpy(ctcopy, ciphertext, 255);
ctcopy += FORMAT_TAG_LEN; /* skip over "$nk$*" */
p = strtokm(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.HASHKEY[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.decal = atoi16[ARCH_INDEX(p[1])];
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1 + 2;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char pass[40+1];
unsigned char out[80];
int i, k;
int idx = 0;
MD5_CTX c;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(out, &ctx);
hex_encode(out, 20, pass);
for (i = 0, k=cur_salt->decal; i < 40; ++i, ++k) {
out[idx++] = pass[i];
if(k>19) k = 0;
out[idx++] = cur_salt->HASHKEY[k];
}
MD5_Init(&c);
MD5_Update(&c, out, 80);
MD5_Final((unsigned char*)crypt_out[index], &c);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*((ARCH_WORD_32*)binary) == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == crypt_out[index][0];
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static void nk_set_key(char *key, int index)
{
strcpy(saved_key[index], key);
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_nk = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
nk_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
nk_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gbdt.h | #ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/json11.hpp>
#include "score_updater.hpp"
#include <cstdio>
#include <vector>
#include <string>
#include <fstream>
#include <memory>
#include <mutex>
#include <map>
using namespace json11;
namespace LightGBM {
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequence of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
virtual const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
virtual int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_preb_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
if (num_iteration > 0) {
num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration));
} else {
num_preb_in_one_row *= max_iteration;
}
} else if (is_pred_contrib) {
num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_preb_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \return Non-empty string if succeeded
*/
virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_);
}
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
virtual const char* SubModelName() const override { return "tree"; }
protected:
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
/*!
* \brief Helper function for bagging, used for multi-threading optimization
* \param start start indice of bagging
* \param cnt count
* \param buffer output buffer
* \return count of left size
*/
data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current interation
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief First order derivative of training data */
std::vector<score_t> gradients_;
/*! \brief Secend order derivative of training data */
std::vector<score_t> hessians_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t> tmp_indices_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
/*! \brief number of threads */
int num_threads_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> offsets_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_cnts_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> left_write_pos_buf_;
/*! \brief Buffer for multi-threading bagging */
std::vector<data_size_t> right_write_pos_buf_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
std::string loaded_parameter_;
Json forced_splits_json_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/MagickCore.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickCoreSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickCoreSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DuplexTransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticMetacontent() returns the image view authentic
% meta-content.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% void *GetImageViewAuthenticMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport void *GetImageViewAuthenticMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% Quantum *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Quantum *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MagickPathExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualMetacontent() returns the image view virtual
% meta-content.
%
% The format of the GetImageViewVirtualMetacontent method is:
%
% const void *GetImageViewVirtualMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const void *GetImageViewVirtualMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const Quantum *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const Quantum *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height,
ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickCoreSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetImageViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict pixels;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
status=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (status == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UpdateImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
openmp_2k.c | // C program for implementation of selection sort
#include <stdio.h>
#include <time.h>
#include "omp.h"
void swap(int *xp, int *yp)
{
int temp = *xp;
*xp = *yp;
*yp = temp;
}
void selectionSort(int arr[], int n)
{
int i=0, j=0, min_i=0;
// One by one move boundary of unsorted subarray
for (i = 0; i < n-1; ++i)
{
// Find the minimum element in unsorted array
min_i=i;
#pragma omp parallel for reduction(min:min_i)
for (j = i+1; j < n; ++j){
min_i = arr[j];
}
#pragma omp parallel for
for (j = i+1; j < n; ++j) {
if (arr[j] == min_i)
min_i = j;
}
// Swap the found minimum element with the first element
//offset so first element is in the correct place
if(i>0 && min_i!=i && i<=(n/2))
swap(&arr[min_i], &arr[i-1]);
}
}
/* Function to print an array */
void printArray(int arr[], int size)
{
int i;
for (i=0; i < size; i++)
printf("%d ", arr[i]);
printf("\n");
}
// Driver program to test above functions
int main()
{
double begin,end;
begin=omp_get_wtime();
//really big array...inefficient with serial code
int arr[] = {2000, 1999, 1998, 1997, 1996, 1995, 1994, 1993, 1992, 1991, 1990, 1989, 1988, 1987, 1986, 1985, 1984, 1983, 1982, 1981,
1980, 1979, 1978, 1977, 1976, 1975, 1974, 1973, 1972, 1971, 1970, 1969, 1968, 1967, 1966, 1965, 1964, 1963, 1962, 1961, 1960, 1959, 1958, 1957, 1956, 1955, 1954, 1953, 1952, 1951,
1950, 1949, 1948, 1947, 1946, 1945, 1944, 1943, 1942, 1941, 1940, 1939, 1938, 1937, 1936, 1935, 1934, 1933, 1932, 1931, 1930, 1929, 1928, 1927, 1926, 1925, 1924, 1923, 1922, 1921,
1920, 1919, 1918, 1917, 1916, 1915, 1914, 1913, 1912, 1911, 1910, 1909, 1908, 1907, 1906, 1905, 1904, 1903, 1902, 1901, 1900, 1899, 1898, 1897, 1896, 1895, 1894, 1893, 1892, 1891,
1890, 1889, 1888, 1887, 1886, 1885, 1884, 1883, 1882, 1881, 1880, 1879, 1878, 1877, 1876, 1875, 1874, 1873, 1872, 1871, 1870, 1869, 1868, 1867, 1866, 1865, 1864, 1863, 1862, 1861,
1860, 1859, 1858, 1857, 1856, 1855, 1854, 1853, 1852, 1851, 1850, 1849, 1848, 1847, 1846, 1845, 1844, 1843, 1842, 1841, 1840, 1839, 1838, 1837, 1836, 1835, 1834, 1833, 1832, 1831,
1830, 1829, 1828, 1827, 1826, 1825, 1824, 1823, 1822, 1821, 1820, 1819, 1818, 1817, 1816, 1815, 1814, 1813, 1812, 1811, 1810, 1809, 1808, 1807, 1806, 1805, 1804, 1803, 1802, 1801,
1800, 1799, 1798, 1797, 1796, 1795, 1794, 1793, 1792, 1791, 1790, 1789, 1788, 1787, 1786, 1785, 1784, 1783, 1782, 1781, 1780, 1779, 1778, 1777, 1776, 1775, 1774, 1773, 1772, 1771,
1770, 1769, 1768, 1767, 1766, 1765, 1764, 1763, 1762, 1761, 1760, 1759, 1758, 1757, 1756, 1755, 1754, 1753, 1752, 1751, 1750, 1749, 1748, 1747, 1746, 1745, 1744, 1743, 1742, 1741,
1740, 1739, 1738, 1737, 1736, 1735, 1734, 1733, 1732, 1731, 1730, 1729, 1728, 1727, 1726, 1725, 1724, 1723, 1722, 1721, 1720, 1719, 1718, 1717, 1716, 1715, 1714, 1713, 1712, 1711,
1710, 1709, 1708, 1707, 1706, 1705, 1704, 1703, 1702, 1701, 1700, 1699, 1698, 1697, 1696, 1695, 1694, 1693, 1692, 1691, 1690, 1689, 1688, 1687, 1686, 1685, 1684, 1683, 1682, 1681,
1680, 1679, 1678, 1677, 1676, 1675, 1674, 1673, 1672, 1671, 1670, 1669, 1668, 1667, 1666, 1665, 1664, 1663, 1662, 1661, 1660, 1659, 1658, 1657, 1656, 1655, 1654, 1653, 1652, 1651,
1650, 1649, 1648, 1647, 1646, 1645, 1644, 1643, 1642, 1641, 1640, 1639, 1638, 1637, 1636, 1635, 1634, 1633, 1632, 1631, 1630, 1629, 1628, 1627, 1626, 1625, 1624, 1623, 1622, 1621,
1620, 1619, 1618, 1617, 1616, 1615, 1614, 1613, 1612, 1611, 1610, 1609, 1608, 1607, 1606, 1605, 1604, 1603, 1602, 1601, 1600, 1599, 1598, 1597, 1596, 1595, 1594, 1593, 1592, 1591,
1590, 1589, 1588, 1587, 1586, 1585, 1584, 1583, 1582, 1581, 1580, 1579, 1578, 1577, 1576, 1575, 1574, 1573, 1572, 1571, 1570, 1569, 1568, 1567, 1566, 1565, 1564, 1563, 1562, 1561,
1560, 1559, 1558, 1557, 1556, 1555, 1554, 1553, 1552, 1551, 1550, 1549, 1548, 1547, 1546, 1545, 1544, 1543, 1542, 1541, 1540, 1539, 1538, 1537, 1536, 1535, 1534, 1533, 1532, 1531,
1530, 1529, 1528, 1527, 1526, 1525, 1524, 1523, 1522, 1521, 1520, 1519, 1518, 1517, 1516, 1515, 1514, 1513, 1512, 1511, 1510, 1509, 1508, 1507, 1506, 1505, 1504, 1503, 1502, 1501,
1500, 1499, 1498, 1497, 1496, 1495, 1494, 1493, 1492, 1491, 1490, 1489, 1488, 1487, 1486, 1485, 1484, 1483, 1482, 1481, 1480, 1479, 1478, 1477, 1476, 1475, 1474, 1473, 1472, 1471,
1470, 1469, 1468, 1467, 1466, 1465, 1464, 1463, 1462, 1461, 1460, 1459, 1458, 1457, 1456, 1455, 1454, 1453, 1452, 1451, 1450, 1449, 1448, 1447, 1446, 1445, 1444, 1443, 1442, 1441,
1440, 1439, 1438, 1437, 1436, 1435, 1434, 1433, 1432, 1431, 1430, 1429, 1428, 1427, 1426, 1425, 1424, 1423, 1422, 1421, 1420, 1419, 1418, 1417, 1416, 1415, 1414, 1413, 1412, 1411,
1410, 1409, 1408, 1407, 1406, 1405, 1404, 1403, 1402, 1401, 1400, 1399, 1398, 1397, 1396, 1395, 1394, 1393, 1392, 1391, 1390, 1389, 1388, 1387, 1386, 1385, 1384, 1383, 1382, 1381,
1380, 1379, 1378, 1377, 1376, 1375, 1374, 1373, 1372, 1371, 1370, 1369, 1368, 1367, 1366, 1365, 1364, 1363, 1362, 1361, 1360, 1359, 1358, 1357, 1356, 1355, 1354, 1353, 1352, 1351,
1350, 1349, 1348, 1347, 1346, 1345, 1344, 1343, 1342, 1341, 1340, 1339, 1338, 1337, 1336, 1335, 1334, 1333, 1332, 1331, 1330, 1329, 1328, 1327, 1326, 1325, 1324, 1323, 1322, 1321,
1320, 1319, 1318, 1317, 1316, 1315, 1314, 1313, 1312, 1311, 1310, 1309, 1308, 1307, 1306, 1305, 1304, 1303, 1302, 1301, 1300, 1299, 1298, 1297, 1296, 1295, 1294, 1293, 1292, 1291,
1290, 1289, 1288, 1287, 1286, 1285, 1284, 1283, 1282, 1281, 1280, 1279, 1278, 1277, 1276, 1275, 1274, 1273, 1272, 1271, 1270, 1269, 1268, 1267, 1266, 1265, 1264, 1263, 1262, 1261,
1260, 1259, 1258, 1257, 1256, 1255, 1254, 1253, 1252, 1251, 1250, 1249, 1248, 1247, 1246, 1245, 1244, 1243, 1242, 1241, 1240, 1239, 1238, 1237, 1236, 1235, 1234, 1233, 1232, 1231,
1230, 1229, 1228, 1227, 1226, 1225, 1224, 1223, 1222, 1221, 1220, 1219, 1218, 1217, 1216, 1215, 1214, 1213, 1212, 1211, 1210, 1209, 1208, 1207, 1206, 1205, 1204, 1203, 1202, 1201,
1200, 1199, 1198, 1197, 1196, 1195, 1194, 1193, 1192, 1191, 1190, 1189, 1188, 1187, 1186, 1185, 1184, 1183, 1182, 1181, 1180, 1179, 1178, 1177, 1176, 1175, 1174, 1173, 1172, 1171,
1170, 1169, 1168, 1167, 1166, 1165, 1164, 1163, 1162, 1161, 1160, 1159, 1158, 1157, 1156, 1155, 1154, 1153, 1152, 1151, 1150, 1149, 1148, 1147, 1146, 1145, 1144, 1143, 1142, 1141,
1140, 1139, 1138, 1137, 1136, 1135, 1134, 1133, 1132, 1131, 1130, 1129, 1128, 1127, 1126, 1125, 1124, 1123, 1122, 1121, 1120, 1119, 1118, 1117, 1116, 1115, 1114, 1113, 1112, 1111,
1110, 1109, 1108, 1107, 1106, 1105, 1104, 1103, 1102, 1101, 1100, 1099, 1098, 1097, 1096, 1095, 1094, 1093, 1092, 1091, 1090, 1089, 1088, 1087, 1086, 1085, 1084, 1083, 1082, 1081,
1080, 1079, 1078, 1077, 1076, 1075, 1074, 1073, 1072, 1071, 1070, 1069, 1068, 1067, 1066, 1065, 1064, 1063, 1062, 1061, 1060, 1059, 1058, 1057, 1056, 1055, 1054, 1053, 1052, 1051,
1050, 1049, 1048, 1047, 1046, 1045, 1044, 1043, 1042, 1041, 1040, 1039, 1038, 1037, 1036, 1035, 1034, 1033, 1032, 1031, 1030, 1029, 1028, 1027, 1026, 1025, 1024, 1023, 1022, 1021,
1020, 1019, 1018, 1017, 1016, 1015, 1014, 1013, 1012, 1011, 1010, 1009, 1008, 1007, 1006, 1005, 1004, 1003, 1002, 1001, 1000, 999, 998, 997, 996, 995, 994, 993, 992, 991,
990, 989, 988, 987, 986, 985, 984, 983, 982, 981, 980, 979, 978, 977, 976, 975, 974, 973, 972, 971, 970, 969, 968, 967, 966, 965, 964, 963, 962, 961,
960, 959, 958, 957, 956, 955, 954, 953, 952, 951, 950, 949, 948, 947, 946, 945, 944, 943, 942, 941, 940, 939, 938, 937, 936, 935, 934, 933, 932, 931,
930, 929, 928, 927, 926, 925, 924, 923, 922, 921, 920, 919, 918, 917, 916, 915, 914, 913, 912, 911, 910, 909, 908, 907, 906, 905, 904, 903, 902, 901,
900, 899, 898, 897, 896, 895, 894, 893, 892, 891, 890, 889, 888, 887, 886, 885, 884, 883, 882, 881, 880, 879, 878, 877, 876, 875, 874, 873, 872, 871,
870, 869, 868, 867, 866, 865, 864, 863, 862, 861, 860, 859, 858, 857, 856, 855, 854, 853, 852, 851, 850, 849, 848, 847, 846, 845, 844, 843, 842, 841,
840, 839, 838, 837, 836, 835, 834, 833, 832, 831, 830, 829, 828, 827, 826, 825, 824, 823, 822, 821, 820, 819, 818, 817, 816, 815, 814, 813, 812, 811,
810, 809, 808, 807, 806, 805, 804, 803, 802, 801, 800, 799, 798, 797, 796, 795, 794, 793, 792, 791, 790, 789, 788, 787, 786, 785, 784, 783, 782, 781,
780, 779, 778, 777, 776, 775, 774, 773, 772, 771, 770, 769, 768, 767, 766, 765, 764, 763, 762, 761, 760, 759, 758, 757, 756, 755, 754, 753, 752, 751,
750, 749, 748, 747, 746, 745, 744, 743, 742, 741, 740, 739, 738, 737, 736, 735, 734, 733, 732, 731, 730, 729, 728, 727, 726, 725, 724, 723, 722, 721,
720, 719, 718, 717, 716, 715, 714, 713, 712, 711, 710, 709, 708, 707, 706, 705, 704, 703, 702, 701, 700, 699, 698, 697, 696, 695, 694, 693, 692, 691,
690, 689, 688, 687, 686, 685, 684, 683, 682, 681, 680, 679, 678, 677, 676, 675, 674, 673, 672, 671, 670, 669, 668, 667, 666, 665, 664, 663, 662, 661,
660, 659, 658, 657, 656, 655, 654, 653, 652, 651, 650, 649, 648, 647, 646, 645, 644, 643, 642, 641, 640, 639, 638, 637, 636, 635, 634, 633, 632, 631,
630, 629, 628, 627, 626, 625, 624, 623, 622, 621, 620, 619, 618, 617, 616, 615, 614, 613, 612, 611, 610, 609, 608, 607, 606, 605, 604, 603, 602, 601,
600, 599, 598, 597, 596, 595, 594, 593, 592, 591, 590, 589, 588, 587, 586, 585, 584, 583, 582, 581, 580, 579, 578, 577, 576, 575, 574, 573, 572, 571,
570, 569, 568, 567, 566, 565, 564, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 551, 550, 549, 548, 547, 546, 545, 544, 543, 542, 541,
540, 539, 538, 537, 536, 535, 534, 533, 532, 531, 530, 529, 528, 527, 526, 525, 524, 523, 522, 521, 520, 519, 518, 517, 516, 515, 514, 513, 512, 511,
510, 509, 508, 507, 506, 505, 504, 503, 502, 501, 500, 499, 498, 497, 496, 495, 494, 493, 492, 491, 490, 489, 488, 487, 486, 485, 484, 483, 482, 481,
480, 479, 478, 477, 476, 475, 474, 473, 472, 471, 470, 469, 468, 467, 466, 465, 464, 463, 462, 461, 460, 459, 458, 457, 456, 455, 454, 453, 452, 451,
450, 449, 448, 447, 446, 445, 444, 443, 442, 441, 440, 439, 438, 437, 436, 435, 434, 433, 432, 431, 430, 429, 428, 427, 426, 425, 424, 423, 422, 421,
420, 419, 418, 417, 416, 415, 414, 413, 412, 411, 410, 409, 408, 407, 406, 405, 404, 403, 402, 401, 400, 399, 398, 397, 396, 395, 394, 393, 392, 391,
390, 389, 388, 387, 386, 385, 384, 383, 382, 381, 380, 379, 378, 377, 376, 375, 374, 373, 372, 371, 370, 369, 368, 367, 366, 365, 364, 363, 362, 361,
360, 359, 358, 357, 356, 355, 354, 353, 352, 351, 350, 349, 348, 347, 346, 345, 344, 343, 342, 341, 340, 339, 338, 337, 336, 335, 334, 333, 332, 331,
330, 329, 328, 327, 326, 325, 324, 323, 322, 321, 320, 319, 318, 317, 316, 315, 314, 313, 312, 311, 310, 309, 308, 307, 306, 305, 304, 303, 302, 301,
300, 299, 298, 297, 296, 295, 294, 293, 292, 291, 290, 289, 288, 287, 286, 285, 284, 283, 282, 281, 280, 279, 278, 277, 276, 275, 274, 273, 272, 271,
270, 269, 268, 267, 266, 265, 264, 263, 262, 261, 260, 259, 258, 257, 256, 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241,
240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224, 223, 222, 221, 220, 219, 218, 217, 216, 215, 214, 213, 212, 211,
210, 209, 208, 207, 206, 205, 204, 203, 202, 201, 200, 199, 198, 197, 196, 195, 194, 193, 192, 191, 190, 189, 188, 187, 186, 185, 184, 183, 182, 181,
180, 179, 178, 177, 176, 175, 174, 173, 172, 171, 170, 169, 168, 167, 166, 165, 164, 163, 162, 161, 160, 159, 158, 157, 156, 155, 154, 153, 152, 151,
150, 149, 148, 147, 146, 145, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121,
120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91,
90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61,
60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31,
30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
int n = sizeof(arr)/sizeof(arr[0]);
selectionSort(arr, n);
printf("Sorted array: \n");
printArray(arr, n);
end=omp_get_wtime();
printf("Selection sort openMP parallel code took %f seconds \n", end-begin);
return 0;
}
|
GB_binop__first_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint8)
// A*D function (colscale): GB (_AxD__first_uint8)
// D*A function (rowscale): GB (_DxB__first_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__first_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__first_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT8 || GxB_NO_FIRST_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) {
for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(4*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(4*t3+Nx-9,128));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
oneWayFunction.c | #include "oneWayFunction.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#ifndef SYS_OS_MAC
#include <omp.h>
#endif
#include "my_time.h"
#include "common.h"
// OpenSSL Library
#include "sha1.h"
#include "sha256.h"
#include "sha512.h"
#include "sha3_256.h"
#include "whirlpool.h"
#include "ripemd160.h"
#include "blake2s256.h"
#include "aes128.h"
#include "des.h"
#include "crc32.h"
#include "hmac_md5.h"
#include "rc4.h"
#include "camellia128.h"
// JTR source code
#include "gost.h"
#include "haval5_256.h"
#include "skein512_256.h"
OneWayFunctionInfor funcInfor[FUNCTION_NUM] = {
{"SHA3-256", sha3_256},
{"SHA1", sha1},
{"SHA256", sha256},
{"SHA512", sha512},
{"Whirlpool", whirlpool},
{"RIPEMD-160", ripemd160},
{"BLAKE2s(256bits)", blake2s256},
{"AES(128bits)", aes128},
{"DES", des},
{"RC4", rc4},
{"Camellia(128bits)", camellia128},
{"CRC32", hello_crc32},
{"HMAC(MD5)", hmac_md5},
{"GOST R 34.11-94", gost},
{"HAVAL-256/5", haval5_256},
{"Skein-512(256bits)", skein512_256}
};
void initOneWayFunction() {
gost_init_table();
CRC32_Table_Init();
}
void testOneWayFunction(const char *mess, const int64_t iterNum) {
/*
int64_t j;
uint32_t messLen = (uint32_t)strlen(mess);
uint8_t input[INPUT_LEN], output[FUNCTION_NUM][OUTPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, messLen*sizeof(char));
printf("**************************** Correctness test (One way function) ****************************\n");
printf("Test message: %s\n", mess);
for (int i = 0; i < FUNCTION_NUM; ++i) {
printf("%02d ", i);
funcInfor[i].func(input, messLen, output[i]);
view_data_u8(funcInfor[i].funcName, output[i], OUTPUT_LEN);
}
printf("*********************************************************************************************\n");
printf("************************************************* Performance test (One way function) *************************************************\n");
uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t));
assert(NULL != result);
memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t));
uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64};
uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t);
printf(" %-18s", "Algorithm");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix)
printf("%12d", threadNumArr[ix]);
printf("\n");
for (int i = 0; i < FUNCTION_NUM; ++i) {
printf("%02d %-18s\t", i, funcInfor[i].funcName);
for (uint32_t ix = 0; ix < threadNumTypes; ++ix) {
omp_set_num_threads(threadNumArr[ix]);
double startTime = get_wall_time();
if (threadNumArr[ix] == 1) {
for (j = 0; j < iterNum; ++j) {
funcInfor[i].func(input, messLen, result + j * OUTPUT_LEN);
}
} else {
#pragma omp parallel for firstprivate(input), private(j) shared(result)
for (j = 0; j < iterNum; ++j) {
funcInfor[i].func(input, messLen, result + j * OUTPUT_LEN);
}
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
printf("%5.0f Kps ", iterNum / 1000 / costTime); fflush(stdout);
// Check result
for (j = 0; j < iterNum; j += 1) {
if (memcmp(output[i], result + j * OUTPUT_LEN, OUTPUT_LEN)) {
printf("Thread num: %u, j: %ld\n", threadNumArr[ix], j);
view_data_u8("output", output[i], OUTPUT_LEN);
view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN);
abort();
}
}
}
printf("\n");
}
if (NULL != result) {
free(result);
result = NULL;
}
*/
printf("***************************************************************************************************************************************\n");
}
|
2Dpfold.c | /*
* minimum free energy
* RNA secondary structure with
* basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <float.h> /* #defines FLT_MAX ... */
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/2Dpfold.h"
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void crosslink(TwoDpfold_vars *vars);
PRIVATE void pf2D_linear(vrna_fold_compound_t *vc);
PRIVATE void pf2D_circ(vrna_fold_compound_t *vc);
PRIVATE char *pbacktrack_circ(vrna_fold_compound_t *vc,
int d1,
int d2);
PRIVATE void backtrack(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int i,
unsigned int j);
PRIVATE void backtrack_qm(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int i,
unsigned int j);
PRIVATE void backtrack_qm1(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int i,
unsigned int j);
PRIVATE void backtrack_qm2(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int k);
PRIVATE void backtrack_qcH(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2);
PRIVATE void backtrack_qcI(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2);
PRIVATE void backtrack_qcM(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2);
PRIVATE void adjustArrayBoundaries(FLT_OR_DBL ***array,
int *k_min,
int *k_max,
int **l_min,
int **l_max,
int k_min_real,
int k_max_real,
int *l_min_real,
int *l_max_real);
INLINE PRIVATE void preparePosteriorBoundaries(int size,
int shift,
int *min_k,
int *max_k,
int **min_l,
int **max_l);
INLINE PRIVATE void updatePosteriorBoundaries(int d1,
int d2,
int *min_k,
int *max_k,
int **min_l,
int **max_l);
INLINE PRIVATE void prepareBoundaries(int min_k_pre,
int max_k_pre,
int min_l_pre,
int max_l_pre,
int bpdist,
int *min_k,
int *max_k,
int **min_l,
int **max_l);
INLINE PRIVATE void prepareArray(FLT_OR_DBL ***array,
int min_k,
int max_k,
int *min_l,
int *max_l);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC vrna_sol_TwoD_pf_t *
vrna_pf_TwoD(vrna_fold_compound_t *vc,
int distance1,
int distance2)
{
unsigned int maxD1 = 0, maxD2 = 0, counter = 0;
int cnt1, cnt2, k_min, k_max, l_min, l_max, ndx;
FLT_OR_DBL q = 0.;
vrna_sol_TwoD_pf_t *output;
vrna_md_t *md;
vrna_mx_pf_t *matrices;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
matrices = vc->exp_matrices;
md = &(vc->exp_params->model_details);
if (distance1 >= 0) {
if ((unsigned int)distance1 > maxD1)
vrna_message_warning("vrna_pf_TwoD@2Dpfold.c: limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if (distance2 >= 0) {
if ((unsigned int)distance2 > maxD2)
vrna_message_warning("vrna_pf_TwoD@2Dpfold.c: limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vc->maxD1 = maxD1;
vc->maxD2 = maxD2;
output = (vrna_sol_TwoD_pf_t *)vrna_alloc((((maxD1 + 1) * (maxD2 + 2)) / 2 + 2) * sizeof(vrna_sol_TwoD_pf_t));
pf2D_linear(vc);
if (md->circ)
pf2D_circ(vc);
ndx = vc->iindx[1] - vc->length;
k_min = (md->circ) ? matrices->k_min_Q_c : matrices->k_min_Q[ndx];
k_max = (md->circ) ? matrices->k_max_Q_c : matrices->k_max_Q[ndx];
for (cnt1 = k_min;
cnt1 <= k_max;
cnt1++) {
l_min = (md->circ) ? matrices->l_min_Q_c[cnt1] : matrices->l_min_Q[ndx][cnt1];
l_max = (md->circ) ? matrices->l_max_Q_c[cnt1] : matrices->l_max_Q[ndx][cnt1];
for (cnt2 = l_min;
cnt2 <= l_max;
cnt2 += 2) {
q = (md->circ) ? matrices->Q_c[cnt1][cnt2 / 2] : matrices->Q[ndx][cnt1][cnt2 / 2];
if (q == 0.)
continue;
output[counter].k = cnt1;
output[counter].l = cnt2;
output[counter].q = q;
counter++;
}
}
/* store entry for remaining partition if it exists */
q = (md->circ) ? matrices->Q_c_rem : matrices->Q_rem[ndx];
if (q != 0.) {
output[counter].k = -1;
output[counter].l = -1;
output[counter].q = q;
counter++;
}
/* insert end-marker entry */
output[counter].k = output[counter].l = INF;
counter++;
/* resize to actual dataset amount */
output = (vrna_sol_TwoD_pf_t *)vrna_realloc(output, sizeof(vrna_sol_TwoD_pf_t) * counter);
return output;
}
#if 0
PUBLIC FLT_OR_DBL **
TwoDpfold(TwoDpfold_vars *vars,
int distance1,
int distance2)
{
unsigned int i;
unsigned int maxD1 = 0;
unsigned int maxD2 = 0;
unsigned int mm;
int cnt1, cnt2;
FLT_OR_DBL **output;
initialize_TwoDpfold_vars(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes2(vars);
for (i = 1; i <= (unsigned int)vars->reference_pt1[0]; i++)
if (i < (unsigned int)vars->reference_pt1[i])
maxD1++;
for (i = 1; i <= (unsigned int)vars->reference_pt2[0]; i++)
if (i < (unsigned int)vars->reference_pt2[i])
maxD2++;
mm = maximumMatching(vars->sequence);
maxD1 += mm;
maxD2 += mm;
if (distance1 >= 0) {
if ((unsigned int)distance1 > maxD1)
fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1);
maxD1 = (unsigned int)distance1;
}
if (distance2 >= 0) {
if ((unsigned int)distance2 > maxD2)
fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2);
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (maxD1 + 1));
pf2D_linear(vars);
int ndx = vars->my_iindx[1] - vars->seq_length;
for (cnt1 = vars->k_min_values[ndx]; cnt1 <= MIN2(vars->k_max_values[ndx], vars->maxD1); cnt1++) {
output[cnt1] = (FLT_OR_DBL *)vrna_alloc((vars->maxD2 + 1) * sizeof(FLT_OR_DBL));
for (cnt2 = vars->l_min_values[ndx][cnt1]; cnt2 <= MIN2(vars->l_max_values[ndx][cnt1], vars->maxD2); cnt2 += 2)
output[cnt1][cnt2] = vars->Q[ndx][cnt1][cnt2 / 2];
}
return output;
}
PUBLIC FLT_OR_DBL **
TwoDpfold_circ(TwoDpfold_vars *vars,
int distance1,
int distance2)
{
unsigned int i;
unsigned int maxD1 = 0;
unsigned int maxD2 = 0;
unsigned int mm;
int cnt1, cnt2;
FLT_OR_DBL **output;
initialize_TwoDpfold_vars(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes2(vars);
for (i = 1; i <= (unsigned int)vars->reference_pt1[0]; i++)
if (i < (unsigned int)vars->reference_pt1[i])
maxD1++;
for (i = 1; i <= (unsigned int)vars->reference_pt2[0]; i++)
if (i < (unsigned int)vars->reference_pt2[i])
maxD2++;
mm = maximumMatching(vars->sequence);
maxD1 += mm;
maxD2 += mm;
if (distance1 >= 0) {
if ((unsigned int)distance1 > maxD1)
fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1);
maxD1 = (unsigned int)distance1;
}
if (distance2 >= 0) {
if ((unsigned int)distance2 > maxD2)
fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2);
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (maxD1 + 1));
pf2D_linear(vars);
pf2D_circ(vars);
for (cnt1 = vars->k_min_values_qc; cnt1 <= MIN2(vars->k_max_values_qc, vars->maxD1); cnt1++) {
output[cnt1] = (FLT_OR_DBL *)vrna_alloc((vars->maxD2 + 1) * sizeof(FLT_OR_DBL));
for (cnt2 = vars->l_min_values_qc[cnt1]; cnt2 <= MIN2(vars->l_max_values_qc[cnt1], vars->maxD2); cnt2 += 2)
output[cnt1][cnt2] = vars->Q_c[cnt1][cnt2 / 2];
}
return output;
}
#endif
PRIVATE void
pf2D_linear(vrna_fold_compound_t *vc)
{
char *sequence, *ptype;
short *S1, *reference_pt1, *reference_pt2;
unsigned int *referenceBPs1, *referenceBPs2,
d, i, j, ij, seq_length, maxD1,
maxD2, *mm1, *mm2, *bpdist;
int *my_iindx, *jindx, circ, cnt1, cnt2, cnt3, cnt4, *rtype, turn;
double max_real;
FLT_OR_DBL *scale, Qmax;
vrna_exp_param_t *pf_params;
vrna_mx_pf_t *matrices;
vrna_md_t *md;
max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX;
pf_params = vc->exp_params;
md = &(pf_params->model_details);
matrices = vc->exp_matrices;
sequence = vc->sequence;
seq_length = vc->length;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
scale = matrices->scale;
reference_pt1 = vc->reference_pt1;
reference_pt2 = vc->reference_pt2;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
dangles = md->dangles;
circ = md->circ;
turn = md->min_loop_size;
mm1 = vc->mm1;
mm2 = vc->mm2;
bpdist = vc->bpdist;
Qmax = 0.;
/*array initialization ; qb,qm,q
* qb,qm,q (i,j) are stored as ((n+1-i)*(n-i) div 2 + n+1-j */
for (j = 1; j <= seq_length; j++)
for (i = (j > turn ? (j - turn) : 1); i <= j; i++) {
ij = my_iindx[i] - j;
matrices->k_min_Q[ij] = 0;
matrices->k_max_Q[ij] = 0;
matrices->l_min_Q[ij] = (int *)vrna_alloc(sizeof(int));
matrices->l_max_Q[ij] = (int *)vrna_alloc(sizeof(int));
matrices->l_min_Q[ij][0] = 0;
matrices->l_max_Q[ij][0] = 0;
matrices->Q[ij] = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *));
matrices->Q[ij][0] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL));
matrices->Q[ij][0][0] = 1.0 * scale[j - i + 1];
}
for (d = turn + 2; d <= seq_length; d++) {
/* i,j in [1..seq_length] */
#ifdef _OPENMP
#pragma omp parallel for private(i, j, ij, cnt1, cnt2, cnt3, cnt4)
#endif
for (j = d; j <= seq_length; j++) {
unsigned int k, l, kl, u, ii, dij;
int no_close, type, type_2, tt, da, db, base_da, base_db;
FLT_OR_DBL temp2, aux_en;
i = j - d + 1;
ij = my_iindx[i] - j;
dij = j - i - 1;
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
if (type) {
/* we have a pair */
int k_min_Q_B, k_max_Q_B, l_min_Q_B, l_max_Q_B;
int k_min_post_b, k_max_post_b, *l_min_post_b, *l_max_post_b;
int update_b = 0;
if (!matrices->Q_B[ij]) {
update_b = 1;
k_min_Q_B = l_min_Q_B = 0;
k_max_Q_B = mm1[ij] + referenceBPs1[ij];
l_max_Q_B = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(k_min_Q_B,
k_max_Q_B,
l_min_Q_B,
l_max_Q_B,
bpdist[ij],
&matrices->k_min_Q_B[ij],
&matrices->k_max_Q_B[ij],
&matrices->l_min_Q_B[ij],
&matrices->l_max_Q_B[ij]
);
preparePosteriorBoundaries(matrices->k_max_Q_B[ij] - matrices->k_min_Q_B[ij] + 1,
matrices->k_min_Q_B[ij],
&k_min_post_b,
&k_max_post_b,
&l_min_post_b,
&l_max_post_b
);
prepareArray(&matrices->Q_B[ij],
matrices->k_min_Q_B[ij],
matrices->k_max_Q_B[ij],
matrices->l_min_Q_B[ij],
matrices->l_max_Q_B[ij]
);
}
/* hairpin ----------------------------------------------*/
/* get distance to reference if closing the hairpin
* d1a = dbp(T1_{i,j}, {i,j})
*/
base_da = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_db = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
da = base_da + referenceBPs1[ij];
db = base_db + referenceBPs2[ij];
if (!no_close) {
if ((da >= 0) && (db >= 0)) {
if (((unsigned int)da <= maxD1) && ((unsigned int)db <= maxD2)) {
matrices->Q_B[ij][da][db / 2] = exp_E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[dij + 2];
if (update_b) {
updatePosteriorBoundaries(da,
db,
&k_min_post_b,
&k_max_post_b,
&l_min_post_b,
&l_max_post_b
);
}
} else {
matrices->Q_B_rem[ij] = exp_E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[dij + 2];
}
}
}
/*--------------------------------------------------------
* check for elementary structures involving more than one
* closing pair.
* --------------------------------------------------------*/
for (k = i + 1; k <= MIN2(j - 2 - turn, i + MAXLOOP + 1); k++) {
unsigned int minl, ln_pre;
minl = k + turn + 1;
ln_pre = dij + k;
if (ln_pre > minl + MAXLOOP)
minl = ln_pre - MAXLOOP - 1;
for (l = minl; l < j; l++) {
kl = my_iindx[k] - l;
type_2 = ptype[jindx[l] + k];
if (type_2 == 0)
continue;
type_2 = rtype[type_2];
aux_en = exp_E_IntLoop(k - i - 1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[k - i + j - l];
/* get distance to reference if closing the interior loop
* d2 = dbp(S_{i,j}, S_{k,l} + {i,j})
*/
da = base_da + referenceBPs1[ij] - referenceBPs1[kl];
db = base_db + referenceBPs2[ij] - referenceBPs2[kl];
if (matrices->Q_B_rem[kl])
matrices->Q_B_rem[ij] += matrices->Q_B_rem[kl] * aux_en;
if (!matrices->Q_B[kl])
continue;
for (cnt1 = matrices->k_min_Q_B[kl];
cnt1 <= matrices->k_max_Q_B[kl];
cnt1++)
for (cnt2 = matrices->l_min_Q_B[kl][cnt1];
cnt2 <= matrices->l_max_Q_B[kl][cnt1];
cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->Q_B[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_B[kl][cnt1][cnt2 / 2] * aux_en;
if (update_b) {
updatePosteriorBoundaries(da + cnt1,
db + cnt2,
&k_min_post_b,
&k_max_post_b,
&l_min_post_b,
&l_max_post_b
);
}
} else {
matrices->Q_B_rem[ij] += matrices->Q_B[kl][cnt1][cnt2 / 2] * aux_en;
}
}
} /* end l-loop */
} /* end k-loop */
/* multi-loop contribution ------------------------*/
if (!no_close) {
for (u = i + turn + 2; u < j - turn - 2; u++) {
tt = rtype[type];
temp2 = pf_params->expMLclosing * exp_E_MLstem(tt, S1[j - 1], S1[i + 1], pf_params) * scale[2];
if (matrices->Q_M_rem[my_iindx[i + 1] - u]) {
if (matrices->Q_M1[jindx[j - 1] + u + 1]) {
for (cnt1 = matrices->k_min_Q_M1[jindx[j - 1] + u + 1];
cnt1 <= matrices->k_max_Q_M1[jindx[j - 1] + u + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q_M1[jindx[j - 1] + u + 1][cnt1];
cnt2 <= matrices->l_max_Q_M1[jindx[j - 1] + u + 1][cnt1];
cnt2 += 2)
matrices->Q_B_rem[ij] += matrices->Q_M_rem[my_iindx[i + 1] - u] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt1][cnt2 / 2] * temp2;
}
if (matrices->Q_M1_rem[jindx[j - 1] + u + 1])
matrices->Q_B_rem[ij] += matrices->Q_M_rem[my_iindx[i + 1] - u] * matrices->Q_M1_rem[jindx[j - 1] + u + 1] * temp2;
}
if (matrices->Q_M1_rem[jindx[j - 1] + u + 1]) {
if (matrices->Q_M[my_iindx[i + 1] - u]) {
for (cnt1 = matrices->k_min_Q_M[my_iindx[i + 1] - u];
cnt1 <= matrices->k_max_Q_M[my_iindx[i + 1] - u];
cnt1++)
for (cnt2 = matrices->l_min_Q_M[my_iindx[i + 1] - u][cnt1];
cnt2 <= matrices->l_max_Q_M[my_iindx[i + 1] - u][cnt1];
cnt2 += 2)
matrices->Q_B_rem[ij] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1_rem[jindx[j - 1] + u + 1] * temp2;
}
}
/* get distance to reference if closing the multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1})
*/
da = base_da + referenceBPs1[ij] - referenceBPs1[my_iindx[i + 1] - u] - referenceBPs1[my_iindx[u + 1] - j + 1];
db = base_db + referenceBPs2[ij] - referenceBPs2[my_iindx[i + 1] - u] - referenceBPs2[my_iindx[u + 1] - j + 1];
if (!matrices->Q_M[my_iindx[i + 1] - u])
continue;
if (!matrices->Q_M1[jindx[j - 1] + u + 1])
continue;
for (cnt1 = matrices->k_min_Q_M[my_iindx[i + 1] - u];
cnt1 <= matrices->k_max_Q_M[my_iindx[i + 1] - u];
cnt1++)
for (cnt2 = matrices->l_min_Q_M[my_iindx[i + 1] - u][cnt1];
cnt2 <= matrices->l_max_Q_M[my_iindx[i + 1] - u][cnt1];
cnt2 += 2) {
for (cnt3 = matrices->k_min_Q_M1[jindx[j - 1] + u + 1];
cnt3 <= matrices->k_max_Q_M1[jindx[j - 1] + u + 1];
cnt3++)
for (cnt4 = matrices->l_min_Q_M1[jindx[j - 1] + u + 1][cnt3];
cnt4 <= matrices->l_max_Q_M1[jindx[j - 1] + u + 1][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) {
matrices->Q_B[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2]
* matrices->Q_M1[jindx[j - 1] + u + 1][cnt3][cnt4 / 2]
* temp2;
if (update_b) {
updatePosteriorBoundaries(cnt1 + cnt3 + da,
cnt2 + cnt4 + db,
&k_min_post_b,
&k_max_post_b,
&l_min_post_b,
&l_max_post_b
);
}
} else {
matrices->Q_B_rem[ij] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2]
* matrices->Q_M1[jindx[j - 1] + u + 1][cnt3][cnt4 / 2]
* temp2;
}
}
}
}
}
if (update_b) {
adjustArrayBoundaries(&matrices->Q_B[ij],
&matrices->k_min_Q_B[ij],
&matrices->k_max_Q_B[ij],
&matrices->l_min_Q_B[ij],
&matrices->l_max_Q_B[ij],
k_min_post_b,
k_max_post_b,
l_min_post_b,
l_max_post_b
);
}
} /* end >> if (pair) << */
/* free ends ? -----------------------------------------*/
int k_min_Q_M, k_max_Q_M, l_min_Q_M, l_max_Q_M;
int k_min_post_m, k_max_post_m, *l_min_post_m, *l_max_post_m;
int update_m = 0;
int k_min_Q_M1, k_max_Q_M1, l_min_Q_M1, l_max_Q_M1;
int k_min_post_m1, k_max_post_m1, *l_min_post_m1, *l_max_post_m1;
int update_m1 = 0;
if (!matrices->Q_M[ij]) {
update_m = 1;
k_min_Q_M = l_min_Q_M = 0;
k_max_Q_M = mm1[ij] + referenceBPs1[ij];
l_max_Q_M = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(k_min_Q_M,
k_max_Q_M,
l_min_Q_M,
l_max_Q_M,
bpdist[ij],
&matrices->k_min_Q_M[ij],
&matrices->k_max_Q_M[ij],
&matrices->l_min_Q_M[ij],
&matrices->l_max_Q_M[ij]
);
preparePosteriorBoundaries(matrices->k_max_Q_M[ij] - matrices->k_min_Q_M[ij] + 1,
matrices->k_min_Q_M[ij],
&k_min_post_m,
&k_max_post_m,
&l_min_post_m,
&l_max_post_m
);
prepareArray(&matrices->Q_M[ij],
matrices->k_min_Q_M[ij],
matrices->k_max_Q_M[ij],
matrices->l_min_Q_M[ij],
matrices->l_max_Q_M[ij]
);
}
if (!matrices->Q_M1[jindx[j] + i]) {
update_m1 = 1;
k_min_Q_M1 = l_min_Q_M1 = 0;
k_max_Q_M1 = mm1[ij] + referenceBPs1[ij];
l_max_Q_M1 = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(k_min_Q_M1,
k_max_Q_M1,
l_min_Q_M1,
l_max_Q_M1,
bpdist[ij],
&matrices->k_min_Q_M1[jindx[j] + i],
&matrices->k_max_Q_M1[jindx[j] + i],
&matrices->l_min_Q_M1[jindx[j] + i],
&matrices->l_max_Q_M1[jindx[j] + i]
);
preparePosteriorBoundaries(matrices->k_max_Q_M1[jindx[j] + i] - matrices->k_min_Q_M1[jindx[j] + i] + 1,
matrices->k_min_Q_M1[jindx[j] + i],
&k_min_post_m1,
&k_max_post_m1,
&l_min_post_m1,
&l_max_post_m1
);
prepareArray(&matrices->Q_M1[jindx[j] + i],
matrices->k_min_Q_M1[jindx[j] + i],
matrices->k_max_Q_M1[jindx[j] + i],
matrices->l_min_Q_M1[jindx[j] + i],
matrices->l_max_Q_M1[jindx[j] + i]
);
}
/* j is unpaired */
da = referenceBPs1[ij] - referenceBPs1[ij + 1];
db = referenceBPs2[ij] - referenceBPs2[ij + 1];
if (matrices->Q_M_rem[ij + 1])
matrices->Q_M_rem[ij] += matrices->Q_M_rem[ij + 1] * pf_params->expMLbase * scale[1];
if (matrices->Q_M[ij + 1]) {
for (cnt1 = matrices->k_min_Q_M[ij + 1];
cnt1 <= matrices->k_max_Q_M[ij + 1];
cnt1++) {
for (cnt2 = matrices->l_min_Q_M[ij + 1][cnt1];
cnt2 <= matrices->l_max_Q_M[ij + 1][cnt1];
cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->Q_M[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_M[ij + 1][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1];
if (update_m) {
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&k_min_post_m,
&k_max_post_m,
&l_min_post_m,
&l_max_post_m
);
}
} else {
matrices->Q_M_rem[ij] += matrices->Q_M[ij + 1][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1];
}
}
}
}
if (matrices->Q_M1_rem[jindx[j - 1] + i])
matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_M1_rem[jindx[j - 1] + i] * pf_params->expMLbase * scale[1];
if (matrices->Q_M1[jindx[j - 1] + i]) {
for (cnt1 = matrices->k_min_Q_M1[jindx[j - 1] + i];
cnt1 <= matrices->k_max_Q_M1[jindx[j - 1] + i];
cnt1++)
for (cnt2 = matrices->l_min_Q_M1[jindx[j - 1] + i][cnt1];
cnt2 <= matrices->l_max_Q_M1[jindx[j - 1] + i][cnt1];
cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->Q_M1[jindx[j] + i][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_M1[jindx[j - 1] + i][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1];
if (update_m1) {
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&k_min_post_m1,
&k_max_post_m1,
&l_min_post_m1,
&l_max_post_m1
);
}
} else {
matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_M1[jindx[j - 1] + i][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1];
}
}
}
/* j pairs with i */
if ((!no_close) && type) {
FLT_OR_DBL aux_en = exp_E_MLstem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, pf_params);
if (matrices->Q_B_rem[ij]) {
matrices->Q_M_rem[ij] += matrices->Q_B_rem[ij] * aux_en;
matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_B_rem[ij] * aux_en;
}
if (matrices->Q_B[ij]) {
for (cnt1 = matrices->k_min_Q_B[ij];
cnt1 <= matrices->k_max_Q_B[ij];
cnt1++)
for (cnt2 = matrices->l_min_Q_B[ij][cnt1];
cnt2 <= matrices->l_max_Q_B[ij][cnt1];
cnt2 += 2) {
matrices->Q_M[ij][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en;
if (update_m) {
updatePosteriorBoundaries(cnt1,
cnt2,
&k_min_post_m,
&k_max_post_m,
&l_min_post_m,
&l_max_post_m
);
}
matrices->Q_M1[jindx[j] + i][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en;
if (update_m1) {
updatePosteriorBoundaries(cnt1,
cnt2,
&k_min_post_m1,
&k_max_post_m1,
&l_min_post_m1,
&l_max_post_m1
);
}
}
}
}
/* j pairs with k: i<k<j */
ii = my_iindx[i];
for (k = i + 1; k <= j; k++) {
tt = ptype[jindx[j] + k];
temp2 = exp_E_MLstem(tt, S1[k - 1], (j < seq_length) || circ ? S1[j + 1] : -1, pf_params);
if (matrices->Q_B_rem[my_iindx[k] - j]) {
matrices->Q_M_rem[ij] += matrices->Q_B_rem[my_iindx[k] - j] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2;
if (matrices->Q_M[ii - k + 1]) {
for (cnt1 = matrices->k_min_Q_M[ii - k + 1];
cnt1 <= matrices->k_max_Q_M[ii - k + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q_M[ii - k + 1][cnt1];
cnt2 <= matrices->l_max_Q_M[ii - k + 1][cnt1];
cnt2 += 2)
matrices->Q_M_rem[ij] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B_rem[my_iindx[k] - j] * temp2;
}
if (matrices->Q_M_rem[ii - k + 1])
matrices->Q_M_rem[ij] += matrices->Q_M_rem[ii - k + 1] * matrices->Q_B_rem[my_iindx[k] - j] * temp2;
}
if (matrices->Q_M_rem[ii - k + 1]) {
if (matrices->Q_B[my_iindx[k] - j]) {
for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j];
cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j];
cnt1++)
for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1];
cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1];
cnt2 += 2)
matrices->Q_M_rem[ij] += matrices->Q_M_rem[my_iindx[k] - j] * matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * temp2;
}
}
/* add contributions of QM(i,k-1)*QB(k,j)*e^b and
* e^((k-i) * c) * QB(k,j) * e^b
* therefor we need d1a = dbp(T1_{i,j}, T1_{i,k-1} + T1_{k,j}),
* d1b = dbp(T2_{i,j}, T2_{i,k-1} + T2_{k,j})
* d1c = dbp(T1_{i,j}, T1_{k,j})circ = 0;
* d1d = dbp(T2_{i,j}, T2_{k,j})
*/
da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j];
db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j];
if (!matrices->Q_B[my_iindx[k] - j])
continue;
for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j];
cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j];
cnt1++)
for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1];
cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1];
cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->Q_M[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2;
if (update_m) {
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&k_min_post_m,
&k_max_post_m,
&l_min_post_m,
&l_max_post_m
);
}
} else {
matrices->Q_M_rem[ij] += matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2;
}
}
if (!matrices->Q_M[ii - k + 1])
continue;
da -= referenceBPs1[ii - k + 1];
db -= referenceBPs2[ii - k + 1];
for (cnt1 = matrices->k_min_Q_M[ii - k + 1];
cnt1 <= matrices->k_max_Q_M[ii - k + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q_M[ii - k + 1][cnt1];
cnt2 <= matrices->l_max_Q_M[ii - k + 1][cnt1];
cnt2 += 2)
for (cnt3 = matrices->k_min_Q_B[my_iindx[k] - j];
cnt3 <= matrices->k_max_Q_B[my_iindx[k] - j];
cnt3++)
for (cnt4 = matrices->l_min_Q_B[my_iindx[k] - j][cnt3];
cnt4 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) {
matrices->Q_M[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2;
if (update_m) {
updatePosteriorBoundaries(cnt1 + cnt3 + da,
cnt2 + cnt4 + db,
&k_min_post_m,
&k_max_post_m,
&l_min_post_m,
&l_max_post_m
);
}
} else {
matrices->Q_M_rem[ij] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2;
}
}
}
if (update_m) {
adjustArrayBoundaries(&matrices->Q_M[ij],
&matrices->k_min_Q_M[ij],
&matrices->k_max_Q_M[ij],
&matrices->l_min_Q_M[ij],
&matrices->l_max_Q_M[ij],
k_min_post_m,
k_max_post_m,
l_min_post_m,
l_max_post_m
);
}
if (update_m1) {
adjustArrayBoundaries(&matrices->Q_M1[jindx[j] + i],
&matrices->k_min_Q_M1[jindx[j] + i],
&matrices->k_max_Q_M1[jindx[j] + i],
&matrices->l_min_Q_M1[jindx[j] + i],
&matrices->l_max_Q_M1[jindx[j] + i],
k_min_post_m1,
k_max_post_m1,
l_min_post_m1,
l_max_post_m1
);
}
/* compute contributions for Q(i,j) */
int k_min, k_max, l_min, l_max;
int k_min_post, k_max_post, *l_min_post, *l_max_post;
int update_q = 0;
if (!matrices->Q[ij]) {
update_q = 1;
k_min = l_min = 0;
k_max = mm1[ij] + referenceBPs1[ij];
l_max = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(k_min,
k_max,
l_min,
l_max,
bpdist[ij],
&matrices->k_min_Q[ij],
&matrices->k_max_Q[ij],
&matrices->l_min_Q[ij],
&matrices->l_max_Q[ij]
);
preparePosteriorBoundaries(matrices->k_max_Q[ij] - matrices->k_min_Q[ij] + 1,
matrices->k_min_Q[ij],
&k_min_post,
&k_max_post,
&l_min_post,
&l_max_post
);
prepareArray(&matrices->Q[ij],
matrices->k_min_Q[ij],
matrices->k_max_Q[ij],
matrices->l_min_Q[ij],
matrices->l_max_Q[ij]
);
}
if (type) {
aux_en = vrna_exp_E_ext_stem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, pf_params);
if (matrices->Q_B_rem[ij])
matrices->Q_rem[ij] += matrices->Q_B_rem[ij] * aux_en;
if (matrices->Q_B[ij]) {
for (cnt1 = matrices->k_min_Q_B[ij];
cnt1 <= matrices->k_max_Q_B[ij];
cnt1++)
for (cnt2 = matrices->l_min_Q_B[ij][cnt1];
cnt2 <= matrices->l_max_Q_B[ij][cnt1];
cnt2 += 2) {
matrices->Q[ij][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en;
if (update_q) {
updatePosteriorBoundaries(cnt1,
cnt2,
&k_min_post,
&k_max_post,
&l_min_post,
&l_max_post
);
}
}
}
}
/* j is unpaired */
if (matrices->Q_rem[ij + 1])
matrices->Q_rem[ij] += matrices->Q_rem[ij + 1] * scale[1];
/* da = dbp(T1_{i,j}, T1_{i,j-1})
* db = dbp(T2_{i,j}, T2_{i,j-1})
*/
da = referenceBPs1[ij] - referenceBPs1[ij + 1];
db = referenceBPs2[ij] - referenceBPs2[ij + 1];
if (matrices->Q[ij + 1]) {
for (cnt1 = matrices->k_min_Q[ij + 1];
cnt1 <= matrices->k_max_Q[ij + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q[ij + 1][cnt1];
cnt2 <= matrices->l_max_Q[ij + 1][cnt1];
cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->Q[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q[ij + 1][cnt1][cnt2 / 2] * scale[1];
if (update_q) {
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&k_min_post,
&k_max_post,
&l_min_post,
&l_max_post
);
}
} else {
matrices->Q_rem[ij] += matrices->Q[ij + 1][cnt1][cnt2 / 2] * scale[1];
}
}
}
for (k = j - turn - 1; k > i; k--) {
tt = ptype[jindx[j] + k];
temp2 = vrna_exp_E_ext_stem(tt, S1[k - 1], (j < seq_length) || circ ? S1[j + 1] : -1, pf_params);
if (matrices->Q_rem[my_iindx[i] - k + 1]) {
if (matrices->Q_B[my_iindx[k] - j]) {
for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j];
cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j];
cnt1++)
for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1];
cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1];
cnt2 += 2)
matrices->Q_rem[ij] += matrices->Q_rem[my_iindx[i] - k + 1] * matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * temp2;
}
if (matrices->Q_B_rem[my_iindx[k] - j])
matrices->Q_rem[ij] += matrices->Q_rem[my_iindx[i] - k + 1] * matrices->Q_B_rem[my_iindx[k] - j] * temp2;
}
if (matrices->Q_B_rem[my_iindx[k] - j]) {
if (matrices->Q[my_iindx[i] - k + 1]) {
for (cnt1 = matrices->k_min_Q[my_iindx[i] - k + 1];
cnt1 <= matrices->k_max_Q[my_iindx[i] - k + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q[my_iindx[i] - k + 1][cnt1];
cnt2 <= matrices->l_max_Q[my_iindx[i] - k + 1][cnt1];
cnt2 += 2)
matrices->Q_rem[ij] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B_rem[my_iindx[k] - j] * temp2;
}
}
/* da = dbp{T1_{i,j}, T1_{k,j}
* db = dbp{T2_{i,j}, T2_{k,j}}
*/
da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j] - referenceBPs1[my_iindx[i] - k + 1];
db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j] - referenceBPs2[my_iindx[i] - k + 1];
if (!matrices->Q[my_iindx[i] - k + 1])
continue;
if (!matrices->Q_B[my_iindx[k] - j])
continue;
for (cnt1 = matrices->k_min_Q[my_iindx[i] - k + 1];
cnt1 <= matrices->k_max_Q[my_iindx[i] - k + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q[my_iindx[i] - k + 1][cnt1];
cnt2 <= matrices->l_max_Q[my_iindx[i] - k + 1][cnt1];
cnt2 += 2)
for (cnt3 = matrices->k_min_Q_B[my_iindx[k] - j];
cnt3 <= matrices->k_max_Q_B[my_iindx[k] - j];
cnt3++)
for (cnt4 = matrices->l_min_Q_B[my_iindx[k] - j][cnt3];
cnt4 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) {
matrices->Q[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2;
if (update_q) {
updatePosteriorBoundaries(cnt1 + cnt3 + da,
cnt2 + cnt4 + db,
&k_min_post,
&k_max_post,
&l_min_post,
&l_max_post
);
}
} else {
matrices->Q_rem[ij] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2;
}
}
}
if (update_q) {
adjustArrayBoundaries(&matrices->Q[ij],
&matrices->k_min_Q[ij],
&matrices->k_max_Q[ij],
&matrices->l_min_Q[ij],
&matrices->l_max_Q[ij],
k_min_post,
k_max_post,
l_min_post,
l_max_post
);
}
#if 1
for (cnt1 = matrices->k_min_Q[ij];
cnt1 <= matrices->k_max_Q[ij];
cnt1++) {
for (cnt2 = matrices->l_min_Q[ij][cnt1];
cnt2 <= matrices->l_max_Q[ij][cnt1];
cnt2 += 2) {
if (matrices->Q[ij][cnt1][cnt2 / 2] > Qmax) {
Qmax = matrices->Q[ij][cnt1][cnt2 / 2];
if (Qmax > max_real / 10.)
vrna_message_warning("Q close to overflow: %u %u %g\n", i, j, matrices->Q[ij][cnt1][cnt2 / 2]);
}
if (matrices->Q[ij][cnt1][cnt2 / 2] >= max_real)
vrna_message_error("overflow in pf_fold while calculating q[%u,%u]\n"
"use larger pf_scale", i, j);
}
}
#endif
} /* end of j-loop */
}
}
/* calculate partition function for circular case */
/* NOTE: this is the postprocessing step ONLY */
/* You have to call pf2D_linear first to calculate */
/* complete circular case!!! */
PRIVATE void
pf2D_circ(vrna_fold_compound_t *vc)
{
unsigned int d, p, q, pq, k, l, kl, u, da, db, seq_length, maxD1, maxD2, base_d1, base_d2, *mm1, *mm2, *bpdist;
int *my_iindx, *jindx, type, cnt1, cnt2, cnt3, cnt4, *rtype, turn;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *sequence, *ptype;
FLT_OR_DBL *scale;
vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */
vrna_md_t *md;
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
md = &(pf_params->model_details);
matrices = vc->exp_matrices;
sequence = vc->sequence;
seq_length = vc->length;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
scale = matrices->scale;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
dangles = md->dangles;
turn = md->min_loop_size;
mm1 = vc->mm1;
mm2 = vc->mm2;
bpdist = vc->bpdist;
FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1;
FLT_OR_DBL *Q_B_rem, *Q_M_rem, *Q_M1_rem;
int **l_min_Q_B, **l_max_Q_B, **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1;
int *k_min_Q_B, *k_max_Q_B, *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1;
Q_B = matrices->Q_B;
l_min_Q_B = matrices->l_min_Q_B;
l_max_Q_B = matrices->l_max_Q_B;
k_min_Q_B = matrices->k_min_Q_B;
k_max_Q_B = matrices->k_max_Q_B;
Q_M = matrices->Q_M;
l_min_Q_M = matrices->l_min_Q_M;
l_max_Q_M = matrices->l_max_Q_M;
k_min_Q_M = matrices->k_min_Q_M;
k_max_Q_M = matrices->k_max_Q_M;
Q_M1 = matrices->Q_M1;
l_min_Q_M1 = matrices->l_min_Q_M1;
l_max_Q_M1 = matrices->l_max_Q_M1;
k_min_Q_M1 = matrices->k_min_Q_M1;
k_max_Q_M1 = matrices->k_max_Q_M1;
Q_B_rem = matrices->Q_B_rem;
Q_M_rem = matrices->Q_M_rem;
Q_M1_rem = matrices->Q_M1_rem;
matrices->Q_c_rem = 0.;
matrices->Q_cH_rem = 0.;
matrices->Q_cI_rem = 0.;
matrices->Q_cM_rem = 0.;
/* construct qm2 matrix from qm1 entries */
#ifdef _OPENMP
#pragma omp parallel for private(d, k, l, da, db, cnt1, cnt2, cnt3, cnt4)
#endif
for (k = 1; k < seq_length - turn - 1; k++) {
int k_min_Q_M2, k_max_Q_M2, l_min_Q_M2, l_max_Q_M2;
int k_min_post_m2, k_max_post_m2, *l_min_post_m2, *l_max_post_m2;
int update_m2 = 0;
l_min_post_m2 = l_max_post_m2 = NULL;
if (!matrices->Q_M2[k]) {
update_m2 = 1;
k_min_Q_M2 = l_min_Q_M2 = 0;
k_max_Q_M2 = mm1[my_iindx[k] - seq_length] + referenceBPs1[my_iindx[k] - seq_length];
l_max_Q_M2 = mm2[my_iindx[k] - seq_length] + referenceBPs2[my_iindx[k] - seq_length];
prepareBoundaries(k_min_Q_M2,
k_max_Q_M2,
l_min_Q_M2,
l_max_Q_M2,
bpdist[my_iindx[k] - seq_length],
&matrices->k_min_Q_M2[k],
&matrices->k_max_Q_M2[k],
&matrices->l_min_Q_M2[k],
&matrices->l_max_Q_M2[k]
);
preparePosteriorBoundaries(matrices->k_max_Q_M2[k] - matrices->k_min_Q_M2[k] + 1,
matrices->k_min_Q_M2[k],
&k_min_post_m2,
&k_max_post_m2,
&l_min_post_m2,
&l_max_post_m2
);
prepareArray(&matrices->Q_M2[k],
matrices->k_min_Q_M2[k],
matrices->k_max_Q_M2[k],
matrices->l_min_Q_M2[k],
matrices->l_max_Q_M2[k]
);
}
/* construct Q_M2 */
for (l = k + turn + 1; l < seq_length - turn - 1; l++) {
if (Q_M1_rem[jindx[l] + k]) {
if (Q_M1[jindx[seq_length] + l + 1]) {
for (cnt1 = k_min_Q_M1[jindx[seq_length] + l + 1];
cnt1 <= k_max_Q_M1[jindx[seq_length] + l + 1];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[seq_length] + l + 1][cnt1];
cnt2 <= l_max_Q_M1[jindx[seq_length] + l + 1][cnt1];
cnt2 += 2)
matrices->Q_M2_rem[k] += Q_M1_rem[jindx[l] + k] * Q_M1[jindx[seq_length] + l + 1][cnt1][cnt2 / 2];
}
if (Q_M1_rem[jindx[seq_length] + l + 1])
matrices->Q_M2_rem[k] += Q_M1_rem[jindx[l] + k] * Q_M1_rem[jindx[seq_length] + l + 1];
}
if (Q_M1_rem[jindx[seq_length] + l + 1]) {
if (Q_M1[jindx[l] + k]) {
for (cnt1 = k_min_Q_M1[jindx[l] + k];
cnt1 <= k_max_Q_M1[jindx[l] + k];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1];
cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1];
cnt2 += 2)
matrices->Q_M2_rem[k] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1_rem[jindx[seq_length] + l + 1];
}
}
if (matrices->Q_M1[jindx[l] + k] && matrices->Q_M1[jindx[seq_length] + l + 1]) {
da = referenceBPs1[my_iindx[k] - seq_length] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - seq_length];
db = referenceBPs2[my_iindx[k] - seq_length] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - seq_length];
for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++)
for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) {
for (cnt3 = k_min_Q_M1[jindx[seq_length] + l + 1]; cnt3 <= k_max_Q_M1[jindx[seq_length] + l + 1]; cnt3++)
for (cnt4 = l_min_Q_M1[jindx[seq_length] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[seq_length] + l + 1][cnt3]; cnt4 += 2) {
if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) {
matrices->Q_M2[k][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4 / 2];
if (update_m2) {
updatePosteriorBoundaries(cnt1 + cnt3 + da,
cnt2 + cnt4 + db,
&k_min_post_m2,
&k_max_post_m2,
&l_min_post_m2,
&l_max_post_m2
);
}
} else {
matrices->Q_M2_rem[k] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4 / 2];
}
}
}
}
}
if (update_m2) {
adjustArrayBoundaries(&matrices->Q_M2[k],
&matrices->k_min_Q_M2[k],
&matrices->k_max_Q_M2[k],
&matrices->l_min_Q_M2[k],
&matrices->l_max_Q_M2[k],
k_min_post_m2,
k_max_post_m2,
l_min_post_m2,
l_max_post_m2
);
}
}
base_d1 = referenceBPs1[my_iindx[1] - seq_length];
base_d2 = referenceBPs2[my_iindx[1] - seq_length];
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, min_k_real_qcH, max_k_real_qcH, min_k_real_qcI, max_k_real_qcI, min_k_real_qcM, max_k_real_qcM;
int *min_l_real, *max_l_real, *min_l_real_qcH, *max_l_real_qcH, *min_l_real_qcI, *max_l_real_qcI, *min_l_real_qcM, *max_l_real_qcM;
int update_c, update_cH, update_cI, update_cM;
max_l_real_qcM = min_l_real_qcM = NULL;
max_l_real_qcI = min_l_real_qcI = NULL;
max_l_real_qcH = min_l_real_qcH = NULL;
max_l_real = min_l_real = NULL;
update_c = update_cH = update_cI = update_cM = 0;
min_k = min_l = 0;
max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length];
max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length];
#ifdef _OPENMP
#pragma omp sections
{
#pragma omp section
{
#endif
if (!matrices->Q_c) {
update_c = 1;
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_Q_c,
&matrices->k_max_Q_c,
&matrices->l_min_Q_c,
&matrices->l_max_Q_c
);
prepareArray(&matrices->Q_c,
matrices->k_min_Q_c,
matrices->k_max_Q_c,
matrices->l_min_Q_c,
matrices->l_max_Q_c
);
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if (!matrices->Q_cH) {
update_cH = 1;
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_Q_cH,
&matrices->k_max_Q_cH,
&matrices->l_min_Q_cH,
&matrices->l_max_Q_cH
);
prepareArray(&matrices->Q_cH,
matrices->k_min_Q_cH,
matrices->k_max_Q_cH,
matrices->l_min_Q_cH,
matrices->l_max_Q_cH
);
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real_qcH,
&max_k_real_qcH,
&min_l_real_qcH,
&max_l_real_qcH
);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if (!matrices->Q_cI) {
update_cI = 1;
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_Q_cI,
&matrices->k_max_Q_cI,
&matrices->l_min_Q_cI,
&matrices->l_max_Q_cI
);
prepareArray(&matrices->Q_cI,
matrices->k_min_Q_cI,
matrices->k_max_Q_cI,
matrices->l_min_Q_cI,
matrices->l_max_Q_cI
);
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real_qcI,
&max_k_real_qcI,
&min_l_real_qcI,
&max_l_real_qcI
);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if (!matrices->Q_cM) {
update_cM = 1;
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_Q_cM,
&matrices->k_max_Q_cM,
&matrices->l_min_Q_cM,
&matrices->l_max_Q_cM
);
prepareArray(&matrices->Q_cM,
matrices->k_min_Q_cM,
matrices->k_max_Q_cM,
matrices->l_min_Q_cM,
matrices->l_max_Q_cM
);
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real_qcM,
&max_k_real_qcM,
&min_l_real_qcM,
&max_l_real_qcM
);
}
#ifdef _OPENMP
}
}
#endif
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
#ifdef _OPENMP
#pragma omp parallel for private(p, q, pq, k, l, kl, u, da, db, type, cnt1, cnt2, cnt3, cnt4)
#endif
for (q = d; q <= seq_length; q++) {
FLT_OR_DBL qot;
char loopseq[10];
p = q - d + 1;
pq = my_iindx[p] - q;
/* 1. get exterior hairpin contribution */
u = seq_length - q + p - 1;
if (u < turn)
continue;
type = ptype[jindx[q] + p];
if (!type)
continue;
if (((type == 3) || (type == 4)) && no_closingGU)
continue;
/* cause we want to calc the exterior loops, we need the reversed pair type from now on */
type = rtype[type];
if (u < 7) {
strcpy(loopseq, sequence + q - 1);
strncat(loopseq, sequence, p);
}
/* get distance to reference if closing the hairpin
* da = dbp(T1_[1,n}, T1_{p,q})
* db = dbp(T2_{1,n}, T2_{p,q})
*/
da = base_d1 - referenceBPs1[pq];
db = base_d2 - referenceBPs2[pq];
qot = exp_E_Hairpin(u, type, S1[q + 1], S1[p - 1], loopseq, pf_params) * scale[u];
if (Q_B_rem[pq])
matrices->Q_cH_rem += Q_B_rem[pq] * qot;
if (Q_B[pq]) {
for (cnt1 = k_min_Q_B[pq];
cnt1 <= k_max_Q_B[pq];
cnt1++)
for (cnt2 = l_min_Q_B[pq][cnt1];
cnt2 <= l_max_Q_B[pq][cnt1];
cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->Q_cH[cnt1 + da][(cnt2 + db) / 2] += Q_B[pq][cnt1][cnt2 / 2] * qot;
if (update_cH) {
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real_qcH,
&max_k_real_qcH,
&min_l_real_qcH,
&max_l_real_qcH
);
}
} else {
matrices->Q_cH_rem += Q_B[pq][cnt1][cnt2 / 2] * qot;
}
}
}
/* 2. exterior interior loops, i "define" the (k,l) pair as "outer pair" */
/* so "outer type" is rtype[type[k,l]] and inner type is type[p,q] */
if (Q_B_rem[pq]) {
for (k = q + 1; k < seq_length; k++) {
unsigned int ln1, lstart, ln_pre;
ln1 = k - q - 1;
if (ln1 + p - 1 > MAXLOOP)
break;
lstart = k + turn + 1;
ln_pre = ln1 + p + seq_length;
if (ln_pre > lstart + MAXLOOP)
lstart = ln_pre - MAXLOOP - 1;
for (l = lstart; l <= seq_length; l++) {
unsigned int ln2;
int type2;
kl = my_iindx[k] - l;
ln2 = (p - 1) + (seq_length - l);
if ((ln1 + ln2) > MAXLOOP)
continue;
type2 = ptype[jindx[l] + k];
if (!type2)
continue;
qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], pf_params) * scale[ln1 + ln2];
if (Q_B_rem[kl])
matrices->Q_cI_rem += Q_B_rem[pq] * Q_B_rem[kl] * qot;
if (Q_B[kl]) {
for (cnt1 = k_min_Q_B[kl];
cnt1 <= k_max_Q_B[kl];
cnt1++)
for (cnt2 = l_min_Q_B[kl][cnt1];
cnt2 <= l_max_Q_B[kl][cnt1];
cnt2 += 2)
matrices->Q_cI_rem += Q_B_rem[pq] * Q_B[kl][cnt1][cnt2 / 2] * qot;
}
}
}
}
if (Q_B[pq]) {
for (k = q + 1; k < seq_length; k++) {
unsigned int ln1, lstart, ln_pre;
ln1 = k - q - 1;
if (ln1 + p - 1 > MAXLOOP)
break;
lstart = k + turn + 1;
ln_pre = ln1 + p + seq_length;
if (ln_pre > lstart + MAXLOOP)
lstart = ln_pre - MAXLOOP - 1;
for (l = lstart; l <= seq_length; l++) {
unsigned int ln2;
int type2;
kl = my_iindx[k] - l;
ln2 = (p - 1) + (seq_length - l);
if ((ln1 + ln2) > MAXLOOP)
continue;
type2 = ptype[jindx[l] + k];
if (!type2)
continue;
qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], pf_params) * scale[ln1 + ln2];
if (Q_B_rem[kl]) {
for (cnt1 = k_min_Q_B[pq];
cnt1 <= k_max_Q_B[pq];
cnt1++)
for (cnt2 = l_min_Q_B[pq][cnt1];
cnt2 <= l_max_Q_B[pq][cnt1];
cnt2 += 2)
matrices->Q_cI_rem += Q_B[pq][cnt1][cnt2 / 2] * Q_B_rem[kl] * qot;
}
if (!Q_B[kl])
continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{k,l})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{k,l})
*/
da = base_d1 - referenceBPs1[pq] - referenceBPs1[kl];
db = base_d2 - referenceBPs2[pq] - referenceBPs2[kl];
for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++)
for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2)
for (cnt3 = k_min_Q_B[kl]; cnt3 <= k_max_Q_B[kl]; cnt3++)
for (cnt4 = l_min_Q_B[kl][cnt3]; cnt4 <= l_max_Q_B[kl][cnt3]; cnt4 += 2) {
if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) {
matrices->Q_cI[cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_B[pq][cnt1][cnt2 / 2] * Q_B[kl][cnt3][cnt4 / 2] * qot;
if (update_cI) {
updatePosteriorBoundaries(cnt1 + cnt3 + da,
cnt2 + cnt4 + db,
&min_k_real_qcI,
&max_k_real_qcI,
&min_l_real_qcI,
&max_l_real_qcI
);
}
} else {
matrices->Q_cI_rem += Q_B[pq][cnt1][cnt2 / 2] * Q_B[kl][cnt3][cnt4 / 2] * qot;
}
}
}
}
}
}
if (update_cH) {
adjustArrayBoundaries(&matrices->Q_cH,
&matrices->k_min_Q_cH,
&matrices->k_max_Q_cH,
&matrices->l_min_Q_cH,
&matrices->l_max_Q_cH,
min_k_real_qcH,
max_k_real_qcH,
min_l_real_qcH,
max_l_real_qcH
);
}
if (update_cI) {
adjustArrayBoundaries(&matrices->Q_cI,
&matrices->k_min_Q_cI,
&matrices->k_max_Q_cI,
&matrices->l_min_Q_cI,
&matrices->l_max_Q_cI,
min_k_real_qcI,
max_k_real_qcI,
min_l_real_qcI,
max_l_real_qcI
);
}
/* 3. Multiloops */
if (seq_length > 2 * turn - 3) {
#ifdef _OPENMP
#pragma omp parallel for private(k, da, db, cnt1, cnt2, cnt3, cnt4)
#endif
for (k = turn + 2; k < seq_length - 2 * turn - 3; k++) {
if (Q_M_rem[my_iindx[1] - k]) {
if (matrices->Q_M2[k + 1]) {
for (cnt1 = matrices->k_min_Q_M2[k + 1];
cnt1 <= matrices->k_max_Q_M2[k + 1];
cnt1++)
for (cnt2 = matrices->l_min_Q_M2[k + 1][cnt1];
cnt2 <= matrices->l_max_Q_M2[k + 1][cnt1];
cnt2 += 2)
matrices->Q_cM_rem += Q_M_rem[my_iindx[1] - k] * matrices->Q_M2[k + 1][cnt1][cnt2 / 2] * pf_params->expMLclosing;
}
if (matrices->Q_M2_rem[k + 1])
matrices->Q_cM_rem += Q_M_rem[my_iindx[1] - k] * matrices->Q_M2_rem[k + 1] * pf_params->expMLclosing;
}
if (matrices->Q_M2_rem[k + 1]) {
if (Q_M[my_iindx[1] - k]) {
for (cnt1 = k_min_Q_M[my_iindx[1] - k];
cnt1 <= k_max_Q_M[my_iindx[1] - k];
cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1];
cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1];
cnt2 += 2)
matrices->Q_cM_rem += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2_rem[k + 1] * pf_params->expMLclosing;
}
}
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - seq_length];
db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - seq_length];
if (Q_M[my_iindx[1] - k] && matrices->Q_M2[k + 1]) {
for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2)
for (cnt3 = matrices->k_min_Q_M2[k + 1]; cnt3 <= matrices->k_max_Q_M2[k + 1]; cnt3++)
for (cnt4 = matrices->l_min_Q_M2[k + 1][cnt3]; cnt4 <= matrices->l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) {
if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) {
matrices->Q_cM[cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing;
if (update_cM) {
updatePosteriorBoundaries(cnt1 + cnt3 + da,
cnt2 + cnt4 + db,
&min_k_real_qcM,
&max_k_real_qcM,
&min_l_real_qcM,
&max_l_real_qcM
);
}
} else {
matrices->Q_cM_rem += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing;
}
}
}
}
}
if (update_cM) {
adjustArrayBoundaries(&matrices->Q_cM,
&matrices->k_min_Q_cM,
&matrices->k_max_Q_cM,
&matrices->l_min_Q_cM,
&matrices->l_max_Q_cM,
min_k_real_qcM,
max_k_real_qcM,
min_l_real_qcM,
max_l_real_qcM
);
}
for (cnt1 = matrices->k_min_Q_cH;
cnt1 <= matrices->k_max_Q_cH;
cnt1++)
for (cnt2 = matrices->l_min_Q_cH[cnt1];
cnt2 <= matrices->l_max_Q_cH[cnt1];
cnt2 += 2) {
matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cH[cnt1][cnt2 / 2];
if (update_c) {
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
for (cnt1 = matrices->k_min_Q_cI;
cnt1 <= matrices->k_max_Q_cI;
cnt1++)
for (cnt2 = matrices->l_min_Q_cI[cnt1];
cnt2 <= matrices->l_max_Q_cI[cnt1];
cnt2 += 2) {
matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cI[cnt1][cnt2 / 2];
if (update_c) {
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
for (cnt1 = matrices->k_min_Q_cM;
cnt1 <= matrices->k_max_Q_cM;
cnt1++)
for (cnt2 = matrices->l_min_Q_cM[cnt1];
cnt2 <= matrices->l_max_Q_cM[cnt1];
cnt2 += 2) {
matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cM[cnt1][cnt2 / 2];
if (update_c) {
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
matrices->Q_c_rem = matrices->Q_cH_rem + matrices->Q_cI_rem + matrices->Q_cM_rem;
/* add the case were structure is unfolded chain */
if ((referenceBPs1[my_iindx[1] - seq_length] <= maxD1) && (referenceBPs2[my_iindx[1] - seq_length] <= maxD2)) {
matrices->Q_c[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2] += 1.0 * scale[seq_length];
if (update_c) {
updatePosteriorBoundaries(referenceBPs1[my_iindx[1] - seq_length],
referenceBPs2[my_iindx[1] - seq_length],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
} else {
matrices->Q_c_rem += 1.0 * scale[seq_length];
}
adjustArrayBoundaries(&matrices->Q_c,
&matrices->k_min_Q_c,
&matrices->k_max_Q_c,
&matrices->l_min_Q_c,
&matrices->l_max_Q_c,
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
}
/*
* ###################################################
* stochastic backtracking
* ###################################################
*/
PUBLIC char *
vrna_pbacktrack_TwoD(vrna_fold_compound_t *vc,
int d1,
int d2)
{
return vrna_pbacktrack5_TwoD(vc, d1, d2, vc->length);
}
PUBLIC char *
vrna_pbacktrack5_TwoD(vrna_fold_compound_t *vc,
int d1,
int d2,
unsigned int length)
{
char *pstruc, *ptype;
short *S1;
unsigned int i, j, n, start, maxD1, maxD2, da, db,
*referenceBPs1, *referenceBPs2;
int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4, type,
**l_min_Q, **l_max_Q,
**l_min_Q_B, **l_max_Q_B,
*k_min_Q, *k_max_Q,
*k_min_Q_B, *k_max_Q_B, turn;
FLT_OR_DBL r, qt, *scale, ***Q, ***Q_B, *Q_rem, *Q_B_rem;
vrna_exp_param_t *pf_params;
vrna_md_t *md;
vrna_mx_pf_t *matrices;
n = vc->length;
pf_params = vc->exp_params;
md = &(pf_params->model_details);
matrices = vc->exp_matrices;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
my_iindx = vc->iindx;
jindx = vc->jindx;
scale = matrices->scale;
ptype = vc->ptype;
S1 = vc->sequence_encoding;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
turn = pf_params->model_details.min_loop_size;
Q = matrices->Q;
l_min_Q = matrices->l_min_Q;
l_max_Q = matrices->l_max_Q;
k_min_Q = matrices->k_min_Q;
k_max_Q = matrices->k_max_Q;
Q_B = matrices->Q_B;
l_min_Q_B = matrices->l_min_Q_B;
l_max_Q_B = matrices->l_max_Q_B;
k_min_Q_B = matrices->k_min_Q_B;
k_max_Q_B = matrices->k_max_Q_B;
Q_rem = matrices->Q_rem;
Q_B_rem = matrices->Q_B_rem;
cnt1 = cnt2 = cnt3 = cnt4 = -1;
if (md->circ) {
if (n != length)
vrna_message_error("vrna_pbacktrack_TwoD@2Dfold.c: cotranscriptional backtracking for circular RNAs not supported!");
return pbacktrack_circ(vc, d1, d2);
}
if (length > n)
vrna_message_error("vrna_pbacktrack_TwoD@2Dpfold.c: requested transcript length exceeds sequence length!");
#if 0
if (d1 > maxD1)
vrna_message_error("pbacktrack@2Dpfold.c: distance to 1st reference structure to high!");
if (d2 > maxD2)
vrna_message_error("pbacktrack@2Dpfold.c: distance to 2nd reference structure to high!");
#endif
/* check whether the chosen neighborhood exists at all */
int dumb = 1;
ij = my_iindx[1] - length;
if ((d1 == -1) && (Q_rem[ij] != 0.)) {
dumb = 0;
} else {
if ((k_min_Q[ij] <= d1) && (k_max_Q[ij] >= d1)) {
int l_min = l_min_Q[ij][d1];
if ((d2 % 2) == (l_min % 2))
if ((l_min <= d2) && (l_max_Q[ij][d1] >= d2))
dumb = 0;
}
}
if (dumb) {
vrna_message_error("neighborhood %d:%d is not in scope of calculated partition function!\n"
"pbacktrack@2Dpfold.c: exiting...",
d1, d2);
}
pstruc = vrna_alloc((length + 1) * sizeof(char));
for (i = 0; i < length; i++)
pstruc[i] = '.';
pstruc[i] = '\0';
start = 1;
while (start < length) {
int sn = my_iindx[start] - length;
/* find i position of first pair */
FLT_OR_DBL qln_i = 0, qln_i1 = 0;
if (d1 == -1) {
qln_i = Q_rem[sn];
/* open chain ? */
if ((maxD1 > referenceBPs1[sn])
&& (maxD2 > referenceBPs2[sn])) {
r = vrna_urn() * qln_i;
if (scale[length - start + 1] > r)
return pstruc;
}
/* lets see if we find a base pair with i involved */
for (i = start; i < length; i++) {
r = vrna_urn() * qln_i;
qln_i1 = Q_rem[my_iindx[i + 1] - length];
da = referenceBPs1[sn] - referenceBPs1[my_iindx[i + 1] - length];
db = referenceBPs2[sn] - referenceBPs2[my_iindx[i + 1] - length];
for (cnt1 = k_min_Q[my_iindx[i + 1] - length];
cnt1 <= k_max_Q[my_iindx[i + 1] - length];
cnt1++)
for (cnt2 = l_min_Q[my_iindx[i + 1] - length][cnt1];
cnt2 <= l_max_Q[my_iindx[i + 1] - length][cnt1];
cnt2 += 2)
if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2))
qln_i1 += Q[my_iindx[i + 1] - length][cnt1][cnt2 / 2];
if (r > qln_i1 * scale[1])
break;
qln_i = qln_i1;
}
if (i >= length)
break; /* no more pairs */
/* i is paired, find pairing partner j */
r = vrna_urn() * (qln_i - qln_i1 * scale[1]);
for (qt = 0, j = i + turn + 1; j < length; j++) {
ij = my_iindx[i] - j;
type = ptype[jindx[j] + i];
if (type) {
cnt1 = cnt2 = cnt3 = cnt4 = -1;
double qkl = vrna_exp_E_ext_stem(type, (i > 1) ? S1[i - 1] : -1, S1[j + 1], pf_params);
if (Q_B_rem[ij] != 0.) {
if (Q_rem[my_iindx[j + 1] - length] != 0.) {
qt += qkl * Q_B_rem[ij] * Q_rem[my_iindx[j + 1] - length];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape_rem;
}
if (Q[my_iindx[j + 1] - length]) {
for (cnt3 = k_min_Q[my_iindx[j + 1] - length];
cnt3 <= k_max_Q[my_iindx[j + 1] - length];
cnt3++)
for (cnt4 = l_min_Q[my_iindx[j + 1] - length][cnt3];
cnt4 <= l_max_Q[my_iindx[j + 1] - length][cnt3];
cnt4 += 2) {
qt += qkl * Q_B_rem[ij] * Q[my_iindx[j + 1] - length][cnt3][cnt4 / 2];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape_rem;
}
}
}
if (Q_rem[my_iindx[j + 1] - length] != 0.) {
cnt3 = cnt4 = -1;
if (Q_B[ij]) {
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2) {
qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q_rem[my_iindx[j + 1] - length];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape_rem;
}
}
}
/* if we still search for pairing partner j, we go on here... */
if (Q_B[ij] && Q[my_iindx[j + 1] - length]) {
da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j + 1] - length];
db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j + 1] - length];
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q[my_iindx[j + 1] - length];
cnt3 <= k_max_Q[my_iindx[j + 1] - length];
cnt3++)
for (cnt4 = l_min_Q[my_iindx[j + 1] - length][cnt3];
cnt4 <= l_max_Q[my_iindx[j + 1] - length][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) {
qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q[my_iindx[j + 1] - length][cnt3][cnt4 / 2];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape_rem;
}
}
} /* end if(type) */
} /* end for(j) */
cnt1 = cnt2 = cnt3 = cnt4 = -1;
/* dont forget the case where i pairs with n */
j = length;
ij = my_iindx[i] - j;
type = ptype[jindx[j] + i];
if (type) {
double qkl = vrna_exp_E_ext_stem(type, (i > 1) ? S1[i - 1] : -1, (j < n) ? S1[j + 1] : -1, pf_params);
if (Q_B_rem[ij] != 0.) {
qt += qkl * Q_B_rem[ij];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape_rem;
}
/* if we still search for pairing partner j, we go on here... */
if (Q_B[ij]) {
da = referenceBPs1[sn] - referenceBPs1[ij];
db = referenceBPs2[sn] - referenceBPs2[ij];
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2)
if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) {
qt += qkl * Q_B[ij][cnt1][cnt2 / 2];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape_rem;
}
}
} /* end if(type) */
j++;
pbacktrack_ext_loop_early_escape_rem:
if (j == length + 1)
vrna_message_error("pbacktrack@2Dpfold.c: backtracking failed in ext loop (rem)");
/* finally start backtracking the first exterior stem */
backtrack(vc, pstruc, cnt1, cnt2, i, j);
if (j == length)
break;
start = j + 1;
d1 = cnt3;
d2 = cnt4;
} /* end if d1 ==-1 */
else {
qln_i = Q[sn][d1][d2 / 2];
/* open chain ? */
if ((d1 == referenceBPs1[sn])
&& (d2 == referenceBPs2[sn])) {
r = vrna_urn() * qln_i;
if (scale[length - start + 1] > r)
return pstruc;
}
for (i = start; i < length; i++) {
r = vrna_urn() * qln_i;
da = referenceBPs1[sn] - referenceBPs1[my_iindx[i + 1] - length];
db = referenceBPs2[sn] - referenceBPs2[my_iindx[i + 1] - length];
qln_i1 = 0;
if (d1 >= da && d2 >= db) {
if (
(d1 - da >= k_min_Q[my_iindx[i + 1] - length])
&& (d1 - da <= k_max_Q[my_iindx[i + 1] - length])) {
if (
(d2 - db >= l_min_Q[my_iindx[i + 1] - length][d1 - da])
&& (d2 - db <= l_max_Q[my_iindx[i + 1] - length][d1 - da]))
qln_i1 += Q[my_iindx[i + 1] - length][d1 - da][(d2 - db) / 2];
}
}
if (r > qln_i1 * scale[1])
break; /* i is paired */
qln_i = qln_i1;
}
if (i >= length)
break; /* no more pairs */
/* now find the pairing partner j */
r = vrna_urn() * (qln_i - qln_i1 * scale[1]);
for (qt = 0, j = i + 1; j < length; j++) {
int type;
ij = my_iindx[i] - j;
type = ptype[jindx[j] + i];
if (type) {
double qkl = 1.0;
qkl *= vrna_exp_E_ext_stem(type, (i > 1) ? S1[i - 1] : -1, S1[j + 1], pf_params);
da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j + 1] - length];
db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j + 1] - length];
if ((d1 >= da)
&& (d2 >= db)
&& Q_B[ij]
&& Q[my_iindx[j + 1] - length]) {
for (cnt1 = k_min_Q_B[ij];
cnt1 <= MIN2(k_max_Q_B[ij], d1 - da);
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= MIN2(l_max_Q_B[ij][cnt1], d2 - db);
cnt2 += 2)
if ((d1 - da - cnt1 >= k_min_Q[my_iindx[j + 1] - length])
&& (d1 - da - cnt1 <= k_max_Q[my_iindx[j + 1] - length])) {
if ((d2 - db - cnt2 >= l_min_Q[my_iindx[j + 1] - length][d1 - da - cnt1])
&& (d2 - db - cnt2 <= l_max_Q[my_iindx[j + 1] - length][d1 - da - cnt1])) {
qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q[my_iindx[j + 1] - length][d1 - da - cnt1][(d2 - db - cnt2) / 2];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape;
}
}
}
}
}
/* now dont forget the case j==n */
j = length;
ij = my_iindx[i] - j;
int type = ptype[jindx[j] + i];
if (type) {
double qkl = 1.0;
qkl *= vrna_exp_E_ext_stem(type, (i > 1) ? S1[i - 1] : -1, (j < n) ? S1[j + 1] : -1, pf_params);
da = referenceBPs1[sn] - referenceBPs1[ij];
db = referenceBPs2[sn] - referenceBPs2[ij];
if (d1 >= da && d2 >= db) {
cnt1 = d1 - da;
cnt2 = d2 - db;
if ((cnt1 >= k_min_Q_B[ij]) && (cnt1 <= k_max_Q_B[ij])) {
if ((cnt2 >= l_min_Q_B[ij][cnt1]) && (cnt2 <= l_max_Q_B[ij][cnt1])) {
qt += qkl * Q_B[ij][cnt1][cnt2 / 2];
if (qt >= r)
goto pbacktrack_ext_loop_early_escape; /* j is paired */
}
}
}
}
j++;
pbacktrack_ext_loop_early_escape:
if (j == length + 1)
vrna_message_error("pbacktrack@2Dpfold.c: backtracking failed in ext loop");
backtrack(vc, pstruc, cnt1, cnt2, i, j);
if (j == length)
break;
start = j + 1;
d1 -= cnt1 + da;
d2 -= cnt2 + db;
} /* end if d1!=-1 */
}
return pstruc;
}
PRIVATE char *
pbacktrack_circ(vrna_fold_compound_t *vc,
int d1,
int d2)
{
char *pstruc;
unsigned int i, n, maxD1, maxD2,
*referenceBPs1, *referenceBPs2;
int *my_iindx,
k_min_Q_c, k_max_Q_c,
k_min_Q_cH, k_max_Q_cH,
k_min_Q_cI, k_max_Q_cI,
k_min_Q_cM, k_max_Q_cM,
*l_min_Q_c, *l_max_Q_c,
*l_min_Q_cH, *l_max_Q_cH,
*l_min_Q_cI, *l_max_Q_cI,
*l_min_Q_cM, *l_max_Q_cM;
FLT_OR_DBL r, *scale, qot,
**Q_c, **Q_cH, **Q_cI, **Q_cM,
Q_c_rem, Q_cH_rem, Q_cI_rem, Q_cM_rem;
vrna_mx_pf_t *matrices;
matrices = vc->exp_matrices;
n = vc->length;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
my_iindx = vc->iindx;
scale = matrices->scale;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
Q_c = matrices->Q_c;
l_min_Q_c = matrices->l_min_Q_c;
l_max_Q_c = matrices->l_max_Q_c;
k_min_Q_c = matrices->k_min_Q_c;
k_max_Q_c = matrices->k_max_Q_c;
Q_cH = matrices->Q_cH;
l_min_Q_cH = matrices->l_min_Q_cH;
l_max_Q_cH = matrices->l_max_Q_cH;
k_min_Q_cH = matrices->k_min_Q_cH;
k_max_Q_cH = matrices->k_max_Q_cH;
Q_cI = matrices->Q_cI;
l_min_Q_cI = matrices->l_min_Q_cI;
l_max_Q_cI = matrices->l_max_Q_cI;
k_min_Q_cI = matrices->k_min_Q_cI;
k_max_Q_cI = matrices->k_max_Q_cI;
Q_cM = matrices->Q_cM;
l_min_Q_cM = matrices->l_min_Q_cM;
l_max_Q_cM = matrices->l_max_Q_cM;
k_min_Q_cM = matrices->k_min_Q_cM;
k_max_Q_cM = matrices->k_max_Q_cM;
Q_c_rem = matrices->Q_c_rem;
Q_cH_rem = matrices->Q_cH_rem;
Q_cI_rem = matrices->Q_cI_rem;
Q_cM_rem = matrices->Q_cM_rem;
/* check whether the chosen neighborhood exists at all */
int dumb = 1;
if ((d1 == -1) && (Q_c_rem != 0.)) {
dumb = 0;
} else {
if ((k_min_Q_c <= d1) && (k_max_Q_c >= d1)) {
int l_min = l_min_Q_c[d1];
if ((d2 % 2) == (l_min % 2))
if ((l_min <= d2) && (l_max_Q_c[d1] >= d2))
dumb = 0;
}
}
if (dumb) {
vrna_message_error("neighborhood %d:%d is not in scope of calculated partition function!\n"
"pbacktrack_circ@2Dpfold.c: exiting cheerless...",
d1, d2);
}
pstruc = vrna_alloc((n + 1) * sizeof(char));
for (i = 0; i < n; i++)
pstruc[i] = '.';
pstruc[i] = '\0';
/* now we come to the actual backtracking process */
qot = 0.;
/* backtrack in rest-partition */
if (d1 == -1) {
r = vrna_urn() * Q_c_rem;
/* open chain ? */
if ((referenceBPs1[my_iindx[1] - n] > maxD1) || (referenceBPs2[my_iindx[1] - n] > maxD2)) {
qot = 1.0 * scale[n];
if (qot >= r)
goto pbacktrack_circ_escape;
}
qot += Q_cH_rem;
if (qot >= r) {
backtrack_qcH(vc, pstruc, d1, d2);
goto pbacktrack_circ_escape;
}
qot += Q_cI_rem;
if (qot >= r) {
backtrack_qcI(vc, pstruc, d1, d2);
goto pbacktrack_circ_escape;
}
qot += Q_cM_rem;
if (qot >= r) {
backtrack_qcM(vc, pstruc, d1, d2);
goto pbacktrack_circ_escape;
}
vrna_message_error("pbacktrack_circ@2Dpfold.c: backtracking failed in exterior loop! Exiting cheerless...");
}
/* normal backtracking */
else {
r = vrna_urn() * Q_c[d1][d2 / 2];
/* open chain ? */
if ((referenceBPs1[my_iindx[1] - n] == d1) && (referenceBPs2[my_iindx[1] - n] == d2)) {
qot += 1.0 * scale[n];
if (qot >= r)
goto pbacktrack_circ_escape;
}
/* exterior hairpin loop ? */
if ((k_min_Q_cH <= d1) && (k_max_Q_cH >= d1)) {
int l_min = l_min_Q_cH[d1];
if ((d2 % 2) == (l_min % 2)) {
if ((l_min <= d2) && (l_max_Q_cH[d1] >= d2)) {
qot += Q_cH[d1][d2 / 2];
if (qot >= r) {
backtrack_qcH(vc, pstruc, d1, d2);
goto pbacktrack_circ_escape;
}
}
}
}
/* exterior interior loop ? */
if ((k_min_Q_cI <= d1) && (k_max_Q_cI >= d1)) {
int l_min = l_min_Q_cI[d1];
if ((d2 % 2) == (l_min % 2)) {
if ((l_min <= d2) && (l_max_Q_cI[d1] >= d2)) {
qot += Q_cI[d1][d2 / 2];
if (qot >= r) {
backtrack_qcI(vc, pstruc, d1, d2);
goto pbacktrack_circ_escape;
}
}
}
}
/* exterior multibranch loop ? */
if ((k_min_Q_cM <= d1) && (k_max_Q_cM >= d1)) {
int l_min = l_min_Q_cM[d1];
if ((d2 % 2) == (l_min % 2)) {
if ((l_min <= d2) && (l_max_Q_cM[d1] >= d2)) {
qot += Q_cM[d1][d2 / 2];
if (qot >= r) {
backtrack_qcM(vc, pstruc, d1, d2);
goto pbacktrack_circ_escape;
}
}
}
}
}
pbacktrack_circ_escape:
return pstruc;
}
PRIVATE void
backtrack_qcH(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2)
{
char *ptype, *sequence;
short *S1;
unsigned int i, j, n, maxD1, maxD2,
base_d1, base_d2, da, db,
*referenceBPs1, *referenceBPs2;
int u, *my_iindx, *jindx, ij, cnt1, cnt2, type,
**l_min_Q_B, **l_max_Q_B,
*k_min_Q_B, *k_max_Q_B, *rtype, turn;
FLT_OR_DBL r, qt, *scale, qot,
***Q_B, **Q_cH, *Q_B_rem,
Q_cH_rem;
vrna_exp_param_t *pf_params;
vrna_md_t *md;
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
md = &(pf_params->model_details);
matrices = vc->exp_matrices;
sequence = vc->sequence;
n = vc->length;
my_iindx = vc->iindx;
jindx = vc->jindx;
scale = matrices->scale;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
S1 = vc->sequence_encoding;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
turn = md->min_loop_size;
Q_B_rem = matrices->Q_B_rem;
Q_B = matrices->Q_B;
l_min_Q_B = matrices->l_min_Q_B;
l_max_Q_B = matrices->l_max_Q_B;
k_min_Q_B = matrices->k_min_Q_B;
k_max_Q_B = matrices->k_max_Q_B;
Q_cH_rem = matrices->Q_cH_rem;
Q_cH = matrices->Q_cH;
qot = qt = 0.;
base_d1 = referenceBPs1[my_iindx[1] - n];
base_d2 = referenceBPs2[my_iindx[1] - n];
if (d1 == -1) {
r = vrna_urn() * Q_cH_rem;
for (i = 1; i < n; i++)
for (j = i + turn + 1; j <= n; j++) {
char loopseq[10];
ij = my_iindx[i] - j;
u = n - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
if (!type)
continue;
if (((type == 3) || (type == 4)) && no_closingGU)
continue;
type = rtype[type];
if (u < 7) {
strcpy(loopseq, sequence + j - 1);
strncat(loopseq, sequence, i);
}
qt = exp_E_Hairpin(u, type,
S1[j + 1], S1[i - 1],
loopseq, pf_params)
* scale[u];
if (Q_B_rem[ij]) {
qot += Q_B_rem[ij] * qt;
if (qot >= r) {
backtrack(vc, pstruc, d1, d2, i, j);
return;
}
}
da = base_d1 - referenceBPs1[ij];
db = base_d2 - referenceBPs2[ij];
if (Q_B[ij]) {
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2) {
if (((cnt1 + da) > maxD1)
|| ((cnt2 + db) > maxD2)) {
qot += Q_B[ij][cnt1][cnt2 / 2] * qt;
if (qot >= r) {
backtrack(vc, pstruc, cnt1, cnt2, i, j);
return;
}
}
}
}
}
} else {
r = vrna_urn() * Q_cH[d1][d2 / 2];
for (i = 1; i < n; i++)
for (j = i + turn + 1; j <= n; j++) {
char loopseq[10];
ij = my_iindx[i] - j;
if (!Q_B[ij])
continue;
u = n - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
if (!type)
continue;
if (((type == 3) || (type == 4)) && no_closingGU)
continue;
type = rtype[type];
if (u < 7) {
strcpy(loopseq, sequence + j - 1);
strncat(loopseq, sequence, i);
}
qt = exp_E_Hairpin(u, type,
S1[j + 1], S1[i - 1],
loopseq, pf_params)
* scale[u];
da = base_d1 - referenceBPs1[ij];
db = base_d2 - referenceBPs2[ij];
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2) {
if (((cnt1 + da) == d1)
&& ((cnt2 + db) == d2)) {
qot += Q_B[ij][cnt1][cnt2 / 2] * qt;
if (qot >= r) {
backtrack(vc, pstruc, cnt1, cnt2, i, j);
return;
}
}
}
}
}
vrna_message_error("backtrack_qcH@2Dpfold.c: failed to find closing pair!");
}
PRIVATE void
backtrack_qcI(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2)
{
char *ptype;
short *S1;
unsigned int i, j, ij, p, q, pq, n, maxD1, maxD2,
base_d1, base_d2, da, db,
*referenceBPs1, *referenceBPs2;
int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, type,
**l_min_Q_B, **l_max_Q_B,
*k_min_Q_B, *k_max_Q_B, *rtype, turn;
FLT_OR_DBL r, qt, *scale, qot,
***Q_B, *Q_B_rem,
**Q_cI, Q_cI_rem;
vrna_exp_param_t *pf_params;
vrna_md_t *md;
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
md = &(pf_params->model_details);
matrices = vc->exp_matrices;
n = vc->length;
my_iindx = vc->iindx;
jindx = vc->jindx;
scale = matrices->scale;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
S1 = vc->sequence_encoding;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
turn = md->min_loop_size;
Q_B = matrices->Q_B;
l_min_Q_B = matrices->l_min_Q_B;
l_max_Q_B = matrices->l_max_Q_B;
k_min_Q_B = matrices->k_min_Q_B;
k_max_Q_B = matrices->k_max_Q_B;
Q_cI = matrices->Q_cI;
Q_B_rem = matrices->Q_B_rem;
Q_cI_rem = matrices->Q_cI_rem;
qot = qt = 0.;
base_d1 = referenceBPs1[my_iindx[1] - n];
base_d2 = referenceBPs2[my_iindx[1] - n];
if (d1 == -1) {
r = vrna_urn() * Q_cI_rem;
for (i = 1; i < n; i++)
for (j = i + turn + 1; j <= n; j++) {
ij = my_iindx[i] - j;
type = rtype[(unsigned int)ptype[jindx[j] + i]];
if (!type)
continue;
if (Q_B_rem[ij]) {
for (p = j + 1; p < n; p++) {
unsigned int ln1, qstart, ln_pre;
ln1 = p - j - 1;
if (ln1 + i - 1 > MAXLOOP)
break;
qstart = p + turn + 1;
ln_pre = ln1 + i + n;
if (ln_pre > qstart + MAXLOOP)
qstart = ln_pre - MAXLOOP - 1;
for (q = qstart; q <= n; q++) {
unsigned int ln2;
int type2;
pq = my_iindx[p] - q;
ln2 = (i - 1) + (n - q);
if ((ln1 + ln2) > MAXLOOP)
continue;
type2 = ptype[jindx[q] + p];
if (!type2)
continue;
qt = exp_E_IntLoop(ln2, ln1,
rtype[type2], type,
S1[q + 1], S1[p - 1],
S1[i - 1], S1[j + 1],
pf_params)
* scale[ln1 + ln2];
if (Q_B_rem[pq]) {
qot += Q_B_rem[ij] * Q_B_rem[pq] * qt;
if (qot > r) {
backtrack(vc, pstruc, d1, d2, i, j);
backtrack(vc, pstruc, d1, d2, p, q);
return;
}
}
if (Q_B[pq]) {
for (cnt1 = k_min_Q_B[pq];
cnt1 <= k_max_Q_B[pq];
cnt1++)
for (cnt2 = l_min_Q_B[pq][cnt1];
cnt2 <= l_max_Q_B[pq][cnt1];
cnt2 += 2) {
qot += Q_B_rem[ij] * Q_B[pq][cnt1][cnt2 / 2] * qt;
if (qot > r) {
backtrack(vc, pstruc, d1, d2, i, j);
backtrack(vc, pstruc, cnt1, cnt2, p, q);
return;
}
}
}
}
}
}
if (Q_B[ij]) {
for (p = j + 1; p < n; p++) {
unsigned int ln1, qstart, ln_pre;
ln1 = p - j - 1;
if (ln1 + i - 1 > MAXLOOP)
break;
qstart = p + turn + 1;
ln_pre = ln1 + i + n;
if (ln_pre > qstart + MAXLOOP)
qstart = ln_pre - MAXLOOP - 1;
for (q = qstart; q <= n; q++) {
unsigned int ln2;
int type2;
pq = my_iindx[p] - q;
ln2 = (i - 1) + (n - q);
if ((ln1 + ln2) > MAXLOOP)
continue;
type2 = ptype[jindx[q] + p];
if (!type2)
continue;
qt = exp_E_IntLoop(ln2, ln1,
rtype[type2], type,
S1[q + 1], S1[p - 1],
S1[i - 1], S1[j + 1],
pf_params)
* scale[ln1 + ln2];
if (Q_B_rem[pq]) {
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2) {
qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B_rem[pq] * qt;
if (qot > r) {
backtrack(vc, pstruc, cnt1, cnt2, i, j);
backtrack(vc, pstruc, d1, d2, p, q);
return;
}
}
}
if (Q_B[pq]) {
da = base_d1
- referenceBPs1[ij]
- referenceBPs1[pq];
db = base_d2
- referenceBPs2[ij]
- referenceBPs2[pq];
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_B[pq];
cnt3 <= k_max_Q_B[pq];
cnt3++)
for (cnt4 = l_min_Q_B[pq][cnt3];
cnt4 <= l_max_Q_B[pq][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) > maxD1)
|| ((cnt2 + cnt4 + db) > maxD2)) {
qot += Q_B[ij][cnt1][cnt2 / 2]
* Q_B[pq][cnt3][cnt4 / 2]
* qt;
if (qot > r) {
backtrack(vc, pstruc, cnt1, cnt2, i, j);
backtrack(vc, pstruc, cnt3, cnt4, p, q);
return;
}
}
}
}
}
}
}
}
} else {
r = vrna_urn() * Q_cI[d1][d2 / 2];
for (i = 1; i < n; i++)
for (j = i + turn + 1; j <= n; j++) {
ij = my_iindx[i] - j;
type = rtype[(unsigned int)ptype[jindx[j] + i]];
if (!type)
continue;
if (!Q_B[ij])
continue;
for (p = j + 1; p < n; p++) {
unsigned int ln1, qstart, ln_pre;
ln1 = p - j - 1;
if (ln1 + i - 1 > MAXLOOP)
break;
qstart = p + turn + 1;
ln_pre = ln1 + i + n;
if (ln_pre > qstart + MAXLOOP)
qstart = ln_pre - MAXLOOP - 1;
for (q = qstart; q <= n; q++) {
unsigned int ln2;
int type2;
pq = my_iindx[p] - q;
if (!Q_B[pq])
continue;
ln2 = (i - 1) + (n - q);
if ((ln1 + ln2) > MAXLOOP)
continue;
type2 = ptype[jindx[q] + p];
if (!type2)
continue;
qt = exp_E_IntLoop(ln2, ln1,
rtype[type2], type,
S1[q + 1], S1[p - 1],
S1[i - 1], S1[j + 1],
pf_params)
* scale[ln1 + ln2];
da = base_d1
- referenceBPs1[ij]
- referenceBPs1[pq];
db = base_d2
- referenceBPs2[ij]
- referenceBPs2[pq];
for (cnt1 = k_min_Q_B[ij];
cnt1 <= k_max_Q_B[ij];
cnt1++)
for (cnt2 = l_min_Q_B[ij][cnt1];
cnt2 <= l_max_Q_B[ij][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_B[pq];
cnt3 <= k_max_Q_B[pq];
cnt3++)
for (cnt4 = l_min_Q_B[pq][cnt3];
cnt4 <= l_max_Q_B[pq][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) == d1)
&& ((cnt2 + cnt4 + db) == d2)) {
qot += Q_B[ij][cnt1][cnt2 / 2]
* Q_B[pq][cnt3][cnt4 / 2]
* qt;
if (qot > r) {
backtrack(vc, pstruc, cnt1, cnt2, i, j);
backtrack(vc, pstruc, cnt3, cnt4, p, q);
return;
}
}
}
}
}
}
}
}
PRIVATE void
backtrack_qcM(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2)
{
unsigned int k, n, maxD1, maxD2, base_d1, base_d2,
da, db, *referenceBPs1, *referenceBPs2;
int *my_iindx, cnt1, cnt2, cnt3, cnt4,
**l_min_Q_M, **l_max_Q_M,
**l_min_Q_M2, **l_max_Q_M2,
*k_min_Q_M, *k_max_Q_M,
*k_min_Q_M2, *k_max_Q_M2, turn;
FLT_OR_DBL r, qt, qot,
***Q_M, ***Q_M2, **Q_cM,
*Q_M_rem, *Q_M2_rem, Q_cM_rem;
vrna_exp_param_t *pf_params;
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
matrices = vc->exp_matrices;
n = vc->length;
my_iindx = vc->iindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
turn = pf_params->model_details.min_loop_size;
Q_cM = matrices->Q_cM;
Q_M = matrices->Q_M;
l_min_Q_M = matrices->l_min_Q_M;
l_max_Q_M = matrices->l_max_Q_M;
k_min_Q_M = matrices->k_min_Q_M;
k_max_Q_M = matrices->k_max_Q_M;
Q_M2 = matrices->Q_M2;
l_min_Q_M2 = matrices->l_min_Q_M2;
l_max_Q_M2 = matrices->l_max_Q_M2;
k_min_Q_M2 = matrices->k_min_Q_M2;
k_max_Q_M2 = matrices->k_max_Q_M2;
Q_cM_rem = matrices->Q_cM_rem;
Q_M_rem = matrices->Q_M_rem;
Q_M2_rem = matrices->Q_M2_rem;
base_d1 = referenceBPs1[my_iindx[1] - n];
base_d2 = referenceBPs2[my_iindx[1] - n];
qot = qt = 0.;
if (d1 == -1) {
r = vrna_urn() * Q_cM_rem;
for (k = turn + 2;
k < n - 2 * turn - 3;
k++) {
if (Q_M_rem[my_iindx[1] - k]) {
if (Q_M2[k + 1]) {
for (cnt1 = k_min_Q_M2[k + 1];
cnt1 <= k_max_Q_M2[k + 1];
cnt1++)
for (cnt2 = l_min_Q_M2[k + 1][cnt1];
cnt2 <= l_max_Q_M2[k + 1][cnt1];
cnt2 += 2) {
qot += Q_M_rem[my_iindx[1] - k]
* Q_M2[k + 1][cnt1][cnt2 / 2]
* pf_params->expMLclosing;
if (qot > r) {
backtrack_qm(vc, pstruc, d1, d2, 1, k);
backtrack_qm2(vc, pstruc, cnt1, cnt2, k + 1);
return;
}
}
}
if (Q_M2_rem[k + 1]) {
qot += Q_M_rem[my_iindx[1] - k]
* Q_M2_rem[k + 1]
* pf_params->expMLclosing;
if (qot > r) {
backtrack_qm(vc, pstruc, d1, d2, 1, k);
backtrack_qm2(vc, pstruc, d1, d2, k + 1);
return;
}
}
}
if (Q_M2_rem[k + 1]) {
if (Q_M[my_iindx[1] - k]) {
for (cnt1 = k_min_Q_M[my_iindx[1] - k];
cnt1 <= k_max_Q_M[my_iindx[1] - k];
cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1];
cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1];
cnt2 += 2) {
qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2]
* Q_M2_rem[k + 1]
* pf_params->expMLclosing;
if (qot > r) {
backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k);
backtrack_qm2(vc, pstruc, d1, d2, k + 1);
return;
}
}
}
}
da = base_d1
- referenceBPs1[my_iindx[1] - k]
- referenceBPs1[my_iindx[k + 1] - n];
db = base_d2
- referenceBPs2[my_iindx[1] - k]
- referenceBPs2[my_iindx[k + 1] - n];
if (Q_M[my_iindx[1] - k]
&& Q_M2[k + 1]) {
for (cnt1 = k_min_Q_M[my_iindx[1] - k];
cnt1 <= k_max_Q_M[my_iindx[1] - k];
cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1];
cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_M2[k + 1];
cnt3 <= k_max_Q_M2[k + 1];
cnt3++)
for (cnt4 = l_min_Q_M2[k + 1][cnt3];
cnt4 <= l_max_Q_M2[k + 1][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) > maxD1)
|| ((cnt2 + cnt4 + db) > maxD2)) {
qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2]
* Q_M2[k + 1][cnt3][cnt4 / 2]
* pf_params->expMLclosing;
if (qot > r) {
backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k);
backtrack_qm2(vc, pstruc, cnt3, cnt4, k + 1);
return;
}
}
}
}
}
} else {
r = vrna_urn() * Q_cM[d1][d2 / 2];
for (k = turn + 2;
k < n - 2 * turn - 3;
k++) {
da = base_d1
- referenceBPs1[my_iindx[1] - k]
- referenceBPs1[my_iindx[k + 1] - n];
db = base_d2
- referenceBPs2[my_iindx[1] - k]
- referenceBPs2[my_iindx[k + 1] - n];
if (Q_M[my_iindx[1] - k]
&& Q_M2[k + 1]) {
for (cnt1 = k_min_Q_M[my_iindx[1] - k];
cnt1 <= k_max_Q_M[my_iindx[1] - k];
cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1];
cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_M2[k + 1];
cnt3 <= k_max_Q_M2[k + 1];
cnt3++)
for (cnt4 = l_min_Q_M2[k + 1][cnt3];
cnt4 <= l_max_Q_M2[k + 1][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + da) == d1)
&& ((cnt2 + cnt4 + db) == d2)) {
qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2]
* Q_M2[k + 1][cnt3][cnt4 / 2]
* pf_params->expMLclosing;
if (qot > r) {
backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k);
backtrack_qm2(vc, pstruc, cnt3, cnt4, k + 1);
return;
}
}
}
}
}
vrna_message_error("backtrack_qcM@2Dpfold.c: backtracking failed");
}
PRIVATE void
backtrack_qm2(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int k)
{
unsigned int l, n, maxD1, maxD2, da, db,
*referenceBPs1, *referenceBPs2;
int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4,
*k_min_Q_M1, *k_max_Q_M1,
**l_min_Q_M1, **l_max_Q_M1, turn;
FLT_OR_DBL r, qt, qot,
***Q_M2, ***Q_M1,
*Q_M2_rem, *Q_M1_rem;
vrna_mx_pf_t *matrices;
matrices = vc->exp_matrices;
n = vc->length;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
turn = vc->exp_params->model_details.min_loop_size;
Q_M1_rem = matrices->Q_M1_rem;
Q_M1 = matrices->Q_M1;
l_min_Q_M1 = matrices->l_min_Q_M1;
l_max_Q_M1 = matrices->l_max_Q_M1;
k_min_Q_M1 = matrices->k_min_Q_M1;
k_max_Q_M1 = matrices->k_max_Q_M1;
Q_M2_rem = matrices->Q_M2_rem;
Q_M2 = matrices->Q_M2;
qot = qt = 0.;
if (d1 == -1) {
r = vrna_urn() * Q_M2_rem[k];
for (l = k + turn + 1; l < n - turn - 1; l++) {
if (Q_M1_rem[jindx[l] + k]) {
if (Q_M1[jindx[n] + l + 1]) {
for (cnt1 = k_min_Q_M1[jindx[n] + l + 1];
cnt1 <= k_max_Q_M1[jindx[n] + l + 1];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[n] + l + 1][cnt1];
cnt2 <= l_max_Q_M1[jindx[n] + l + 1][cnt1];
cnt2 += 2) {
qot += Q_M1_rem[jindx[l] + k] * Q_M1[jindx[n] + l + 1][cnt1][cnt2 / 2];
if (qot > r) {
backtrack_qm1(vc, pstruc, d1, d2, k, l);
backtrack_qm1(vc, pstruc, cnt1, cnt2, l + 1, n);
return;
}
}
}
if (Q_M1_rem[jindx[n] + l + 1]) {
qot += Q_M1_rem[jindx[l] + k]
* Q_M1_rem[jindx[n] + l + 1];
if (qot > r) {
backtrack_qm1(vc, pstruc, d1, d2, k, l);
backtrack_qm1(vc, pstruc, d1, d2, l + 1, n);
return;
}
}
}
if (Q_M1_rem[jindx[n] + l + 1]) {
if (Q_M1[jindx[l] + k]) {
for (cnt1 = k_min_Q_M1[jindx[l] + k];
cnt1 <= k_max_Q_M1[jindx[l] + k];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1];
cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1];
cnt2 += 2) {
qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2]
* Q_M1_rem[jindx[n] + l + 1];
if (qot > r) {
backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l);
backtrack_qm1(vc, pstruc, d1, d2, l + 1, n);
return;
}
}
}
}
if (!Q_M1[jindx[l] + k])
continue;
if (!Q_M1[jindx[n] + l + 1])
continue;
da = referenceBPs1[my_iindx[k] - n]
- referenceBPs1[my_iindx[k] - l]
- referenceBPs1[my_iindx[l + 1] - n];
db = referenceBPs2[my_iindx[k] - n]
- referenceBPs2[my_iindx[k] - l]
- referenceBPs2[my_iindx[l + 1] - n];
for (cnt1 = k_min_Q_M1[jindx[l] + k];
cnt1 <= k_max_Q_M1[jindx[l] + k];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1];
cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1];
cnt2 += 2) {
for (cnt3 = k_min_Q_M1[jindx[n] + l + 1];
cnt3 <= k_max_Q_M1[jindx[n] + l + 1];
cnt3++)
for (cnt4 = l_min_Q_M1[jindx[n] + l + 1][cnt3];
cnt4 <= l_max_Q_M1[jindx[n] + l + 1][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) > maxD1)
|| ((cnt2 + cnt4 + db) > maxD2)) {
qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2]
* Q_M1[jindx[n] + l + 1][cnt3][cnt4 / 2];
if (qot > r) {
backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l);
backtrack_qm1(vc, pstruc, cnt3, cnt4, l + 1, n);
return;
}
}
}
}
}
} else {
r = vrna_urn() * Q_M2[k][d1][d2 / 2];
for (l = k + turn + 1; l < n - turn - 1; l++) {
if (!Q_M1[jindx[l] + k])
continue;
if (!Q_M1[jindx[n] + l + 1])
continue;
da = referenceBPs1[my_iindx[k] - n]
- referenceBPs1[my_iindx[k] - l]
- referenceBPs1[my_iindx[l + 1] - n];
db = referenceBPs2[my_iindx[k] - n]
- referenceBPs2[my_iindx[k] - l]
- referenceBPs2[my_iindx[l + 1] - n];
for (cnt1 = k_min_Q_M1[jindx[l] + k];
cnt1 <= k_max_Q_M1[jindx[l] + k];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1];
cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1];
cnt2 += 2) {
for (cnt3 = k_min_Q_M1[jindx[n] + l + 1];
cnt3 <= k_max_Q_M1[jindx[n] + l + 1];
cnt3++)
for (cnt4 = l_min_Q_M1[jindx[n] + l + 1][cnt3];
cnt4 <= l_max_Q_M1[jindx[n] + l + 1][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + da) == d1)
&& ((cnt2 + cnt4 + db) == d2)) {
qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2]
* Q_M1[jindx[n] + l + 1][cnt3][cnt4 / 2];
if (qot > r) {
backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l);
backtrack_qm1(vc, pstruc, cnt3, cnt4, l + 1, n);
return;
}
}
}
}
}
}
vrna_message_error("backtrack_qm2@2Dpfold.c: backtracking failed");
}
PRIVATE void
backtrack(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int i,
unsigned int j)
{
FLT_OR_DBL *scale;
unsigned int maxD1, maxD2, base_d1, base_d2, da, db;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype, *sequence;
short *S1, *reference_pt1, *reference_pt2;
int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4, *rtype, turn;
vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */
vrna_md_t *md;
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
md = &(pf_params->model_details);
matrices = vc->exp_matrices;
sequence = vc->sequence;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
my_iindx = vc->iindx;
jindx = vc->jindx;
scale = matrices->scale;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
S1 = vc->sequence_encoding;
reference_pt1 = vc->reference_pt1;
reference_pt2 = vc->reference_pt2;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
turn = md->min_loop_size;
FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1, *Q_B_rem, *Q_M_rem, *Q_M1_rem;
int *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1, *k_min_Q_B, *k_max_Q_B;
int **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1, **l_min_Q_B, **l_max_Q_B;
Q_B = matrices->Q_B;
k_min_Q_B = matrices->k_min_Q_B;
k_max_Q_B = matrices->k_max_Q_B;
l_min_Q_B = matrices->l_min_Q_B;
l_max_Q_B = matrices->l_max_Q_B;
Q_M = matrices->Q_M;
k_min_Q_M = matrices->k_min_Q_M;
k_max_Q_M = matrices->k_max_Q_M;
l_min_Q_M = matrices->l_min_Q_M;
l_max_Q_M = matrices->l_max_Q_M;
Q_M1 = matrices->Q_M1;
k_min_Q_M1 = matrices->k_min_Q_M1;
k_max_Q_M1 = matrices->k_max_Q_M1;
l_min_Q_M1 = matrices->l_min_Q_M1;
l_max_Q_M1 = matrices->l_max_Q_M1;
Q_B_rem = matrices->Q_B_rem;
Q_M_rem = matrices->Q_M_rem;
Q_M1_rem = matrices->Q_M1_rem;
cnt1 = cnt2 = cnt3 = cnt4 = -1;
do {
double r, qbt1 = 0.;
unsigned int k, l, u, u1;
int type;
pstruc[i - 1] = '(';
pstruc[j - 1] = ')';
r = 0.;
ij = my_iindx[i] - j;
l = INF;
if (d1 == -1) {
r = vrna_urn() * Q_B_rem[ij];
if (r == 0.)
vrna_message_error("backtrack@2Dpfold.c: backtracking failed\n");
type = ptype[jindx[j] + i];
u = j - i - 1;
base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
da = base_d1 + referenceBPs1[ij];
db = base_d2 + referenceBPs2[ij];
/* hairpin ? */
if ((da > maxD1) || (db > maxD2))
if (!(((type == 3) || (type == 4)) && no_closingGU))
qbt1 = exp_E_Hairpin(u, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[u + 2];
if (qbt1 >= r)
return; /* found the hairpin we're done */
/* lets see if we form an interior loop */
for (k = i + 1; k <= MIN2(i + MAXLOOP + 1, j - turn - 2); k++) {
unsigned int u_pre, lmin;
u1 = k - i - 1;
lmin = k + turn + 1;
u_pre = u1 + j;
/* lmin = MAX2(k + turn + 1, u1 + j - 1 - MAXLOOP) */
if (u_pre > lmin + MAXLOOP)
lmin = u_pre - 1 - MAXLOOP;
for (l = lmin; l < j; l++) {
int type_2;
type_2 = ptype[jindx[l] + k];
if (type_2) {
cnt1 = cnt2 = -1;
da = base_d1 + referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - l];
db = base_d2 + referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - l];
type_2 = rtype[type_2];
FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[u1 + j - l + 1];
if (Q_B_rem[my_iindx[k] - l] != 0.) {
qbt1 += Q_B_rem[my_iindx[k] - l] * tmp_en;
if (qbt1 > r)
goto backtrack_int_early_escape_rem;
}
if (Q_B[my_iindx[k] - l]) {
for (cnt1 = k_min_Q_B[my_iindx[k] - l];
cnt1 <= k_max_Q_B[my_iindx[k] - l];
cnt1++)
for (cnt2 = l_min_Q_B[my_iindx[k] - l][cnt1];
cnt2 <= l_max_Q_B[my_iindx[k] - l][cnt1];
cnt2 += 2)
if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) {
qbt1 += Q_B[my_iindx[k] - l][cnt1][cnt2 / 2] * tmp_en;
if (qbt1 > r)
goto backtrack_int_early_escape_rem;
}
}
}
}
}
backtrack_int_early_escape_rem:
if (l < j) {
i = k;
j = l;
d1 = cnt1;
d2 = cnt2;
} else {
break;
}
} else {
if ((d1 >= k_min_Q_B[ij]) && (d1 <= k_max_Q_B[ij]))
if ((d2 >= l_min_Q_B[ij][d1]) && (d2 <= l_max_Q_B[ij][d1]))
r = vrna_urn() * Q_B[ij][d1][d2 / 2];
if (r == 0.)
vrna_message_error("backtrack@2Dpfold.c: backtracking failed\n");
type = ptype[jindx[j] + i];
u = j - i - 1;
base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
da = base_d1 + referenceBPs1[ij];
db = base_d2 + referenceBPs2[ij];
/*hairpin contribution*/
if ((da == d1) && (db == d2))
if (!(((type == 3) || (type == 4)) && no_closingGU))
qbt1 = exp_E_Hairpin(u, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[u + 2];
if (qbt1 >= r)
return; /* found the hairpin we're done */
for (k = i + 1; k <= MIN2(i + MAXLOOP + 1, j - turn - 2); k++) {
unsigned int u_pre, lmin;
u1 = k - i - 1;
lmin = k + turn + 1;
u_pre = u1 + j;
/* lmin = MAX2(k + turn + 1, u1 + j - 1 - MAXLOOP) */
if (u_pre > lmin + MAXLOOP)
lmin = u_pre - 1 - MAXLOOP;
for (l = lmin; l < j; l++) {
int type_2;
type_2 = ptype[jindx[l] + k];
if (type_2) {
da = base_d1 + referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - l];
db = base_d2 + referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - l];
type_2 = rtype[type_2];
FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[u1 + j - l + 1];
if (d1 >= da && d2 >= db) {
if ((d1 - da >= k_min_Q_B[my_iindx[k] - l]) && (d1 - da <= k_max_Q_B[my_iindx[k] - l])) {
if ((d2 - db >= l_min_Q_B[my_iindx[k] - l][d1 - da]) && (d2 - db <= l_max_Q_B[my_iindx[k] - l][d1 - da])) {
cnt1 = d1 - da;
cnt2 = d2 - db;
qbt1 += Q_B[my_iindx[k] - l][cnt1][cnt2 / 2] * tmp_en;
if (qbt1 > r)
goto backtrack_int_early_escape;
}
}
}
}
}
}
backtrack_int_early_escape:
if (l < j) {
i = k;
j = l;
d1 = cnt1;
d2 = cnt2;
} else {
break;
}
}
} while (1);
/* backtrack in multi-loop */
{
double r, qt;
unsigned int k, ii, jj;
base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
base_d1 += referenceBPs1[my_iindx[i] - j];
base_d2 += referenceBPs2[my_iindx[i] - j];
i++;
j--;
/* find the first split index */
ii = my_iindx[i]; /* ii-j=[i,j] */
jj = jindx[j]; /* jj+i=[j,i] */
if (d1 == -1) {
/* get total contribution for current part */
for (qt = 0., k = i + 1; k < j; k++) {
if (Q_M_rem[ii - k + 1] != 0.) {
if (Q_M1[jj + k]) {
for (cnt1 = k_min_Q_M1[jj + k];
cnt1 <= k_max_Q_M1[jj + k];
cnt1++)
for (cnt2 = l_min_Q_M1[jj + k][cnt1];
cnt2 <= l_max_Q_M1[jj + k][cnt1];
cnt2 += 2)
qt += Q_M_rem[ii - k + 1] * Q_M1[jj + k][cnt1][cnt2 / 2];
}
if (Q_M1_rem[jj + k] != 0.)
qt += Q_M_rem[ii - k + 1] * Q_M1_rem[jj + k];
}
if (Q_M1_rem[jj + k] != 0.) {
if (Q_M[ii - k + 1]) {
for (cnt1 = k_min_Q_M[ii - k + 1];
cnt1 <= k_max_Q_M[ii - k + 1];
cnt1++)
for (cnt2 = l_min_Q_M[ii - k + 1][cnt1];
cnt2 <= l_max_Q_M[ii - k + 1][cnt1];
cnt2 += 2)
qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jj + k];
}
}
/* calculate introduced distance to reference structures */
if (!Q_M[ii - k + 1])
continue;
if (!Q_M1[jj + k])
continue;
da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j];
db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j];
/* collect all contributing energies */
for (cnt1 = k_min_Q_M[ii - k + 1];
cnt1 <= k_max_Q_M[ii - k + 1];
cnt1++)
for (cnt2 = l_min_Q_M[ii - k + 1][cnt1];
cnt2 <= l_max_Q_M[ii - k + 1][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_M1[jj + k];
cnt3 <= k_max_Q_M1[jj + k];
cnt3++)
for (cnt4 = l_min_Q_M1[jj + k][cnt3];
cnt4 <= l_max_Q_M1[jj + k][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2))
qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2];
}
/* throw the dice */
r = vrna_urn() * qt;
for (qt = 0., k = i + 1; k < j; k++) {
cnt1 = cnt2 = cnt3 = cnt4 = -1;
if (Q_M_rem[ii - k + 1] != 0.) {
if (Q_M1_rem[jj + k] != 0) {
qt += Q_M_rem[ii - k + 1] * Q_M1_rem[jj + k];
if (qt >= r)
goto backtrack_ml_early_escape;
}
if (Q_M1[jj + k]) {
for (cnt3 = k_min_Q_M1[jj + k];
cnt3 <= k_max_Q_M1[jj + k];
cnt3++)
for (cnt4 = l_min_Q_M1[jj + k][cnt3];
cnt4 <= l_max_Q_M1[jj + k][cnt3];
cnt4 += 2) {
qt += Q_M_rem[ii - k + 1] * Q_M1[jj + k][cnt3][cnt4 / 2];
if (qt >= r)
goto backtrack_ml_early_escape;
}
}
}
if (Q_M1_rem[jj + k] != 0.) {
cnt3 = cnt4 = -1;
if (Q_M[ii - k + 1]) {
for (cnt1 = k_min_Q_M[ii - k + 1];
cnt1 <= k_max_Q_M[ii - k + 1];
cnt1++)
for (cnt2 = l_min_Q_M[ii - k + 1][cnt1];
cnt2 <= l_max_Q_M[ii - k + 1][cnt1];
cnt2 += 2) {
qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jj + k];
if (qt >= r)
goto backtrack_ml_early_escape;
}
}
}
/* calculate introduced distance to reference structures */
da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j];
db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j];
/* collect all contributing energies */
if (!Q_M[ii - k + 1])
continue;
if (!Q_M1[jj + k])
continue;
for (cnt1 = k_min_Q_M[ii - k + 1];
cnt1 <= k_max_Q_M[ii - k + 1];
cnt1++)
for (cnt2 = l_min_Q_M[ii - k + 1][cnt1];
cnt2 <= l_max_Q_M[ii - k + 1][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_M1[jj + k];
cnt3 <= k_max_Q_M1[jj + k];
cnt3++)
for (cnt4 = l_min_Q_M1[jj + k][cnt3];
cnt4 <= l_max_Q_M1[jj + k][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) {
qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2];
if (qt >= r)
goto backtrack_ml_early_escape;
}
}
} else {
/* get total contribution */
for (qt = 0., k = i + 1; k < j; k++) {
/* calculate introduced distance to reference structures */
da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j];
db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j];
/* collect all contributing energies */
if (d1 >= da && d2 >= db && Q_M[ii - k + 1] && Q_M1[jj + k]) {
for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= MIN2(k_max_Q_M[ii - k + 1], d1 - da); cnt1++)
for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[ii - k + 1][cnt1], d2 - db); cnt2 += 2)
if ((d1 - cnt1 - da >= k_min_Q_M1[jj + k]) && (d1 - cnt1 - da <= k_max_Q_M1[jj + k]))
if ((d2 - cnt2 - db >= l_min_Q_M1[jj + k][d1 - da - cnt1]) && (d2 - cnt2 - db <= l_max_Q_M1[jj + k][d1 - cnt1 - da]))
qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][d1 - da - cnt1][(d2 - db - cnt2) / 2];
}
}
r = vrna_urn() * qt;
for (qt = 0., k = i + 1; k < j; k++) {
/* calculate introduced distance to reference structures */
da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j];
db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j];
/* collect all contributing energies */
if (d1 >= da && d2 >= db && Q_M[ii - k + 1] && Q_M1[jj + k]) {
for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= MIN2(k_max_Q_M[ii - k + 1], d1 - da); cnt1++)
for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[ii - k + 1][cnt1], d2 - db); cnt2 += 2)
if ((d1 - cnt1 - da >= k_min_Q_M1[jj + k]) && (d1 - cnt1 - da <= k_max_Q_M1[jj + k])) {
if ((d2 - cnt2 - db >= l_min_Q_M1[jj + k][d1 - da - cnt1]) && (d2 - cnt2 - db <= l_max_Q_M1[jj + k][d1 - cnt1 - da])) {
cnt3 = d1 - da - cnt1;
cnt4 = d2 - db - cnt2;
qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2];
if (qt >= r)
goto backtrack_ml_early_escape;
}
}
}
}
}
if (k >= j)
vrna_message_error("backtrack failed, can't find split index ");
backtrack_ml_early_escape:
backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j);
j = k - 1;
backtrack_qm(vc, pstruc, cnt1, cnt2, i, j);
}
}
PRIVATE void
backtrack_qm1(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int i,
unsigned int j)
{
/* i is paired to l, i<l<j; backtrack in qm1 to find l */
FLT_OR_DBL r, qt, *scale;
unsigned int maxD1, maxD2, da, db;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
short *S1;
int *my_iindx, *jindx, cnt1, cnt2, turn;
vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
matrices = vc->exp_matrices;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
my_iindx = vc->iindx;
jindx = vc->jindx;
scale = matrices->scale;
ptype = vc->ptype;
S1 = vc->sequence_encoding;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
turn = pf_params->model_details.min_loop_size;
FLT_OR_DBL ***Q_B, ***Q_M1, *Q_B_rem, *Q_M1_rem;
int *k_min_Q_M1, *k_max_Q_M1, *k_min_Q_B, *k_max_Q_B;
int **l_min_Q_M1, **l_max_Q_M1, **l_min_Q_B, **l_max_Q_B;
Q_B = matrices->Q_B;
k_min_Q_B = matrices->k_min_Q_B;
k_max_Q_B = matrices->k_max_Q_B;
l_min_Q_B = matrices->l_min_Q_B;
l_max_Q_B = matrices->l_max_Q_B;
Q_M1 = matrices->Q_M1;
k_min_Q_M1 = matrices->k_min_Q_M1;
k_max_Q_M1 = matrices->k_max_Q_M1;
l_min_Q_M1 = matrices->l_min_Q_M1;
l_max_Q_M1 = matrices->l_max_Q_M1;
Q_B_rem = matrices->Q_B_rem;
Q_M1_rem = matrices->Q_M1_rem;
unsigned int ii, l;
int type;
r = 0.;
cnt1 = cnt2 = -1;
/* find qm1 contribution */
if (d1 == -1) {
r = vrna_urn() * Q_M1_rem[jindx[j] + i];
} else {
if ((d1 >= k_min_Q_M1[jindx[j] + i]) && (d1 <= k_max_Q_M1[jindx[j] + i]))
if ((d2 >= l_min_Q_M1[jindx[j] + i][d1]) && (d2 <= l_max_Q_M1[jindx[j] + i][d1]))
r = vrna_urn() * Q_M1[jindx[j] + i][d1][d2 / 2];
}
if (r == 0.)
vrna_message_error("backtrack_qm1@2Dpfold.c: backtracking failed\n");
ii = my_iindx[i];
for (qt = 0., l = i + turn + 1; l <= j; l++) {
type = ptype[jindx[l] + i];
if (type) {
FLT_OR_DBL tmp = exp_E_MLstem(type, S1[i - 1], S1[l + 1], pf_params) * pow(pf_params->expMLbase, j - l) * scale[j - l];
/* compute the introduced distance to reference structures */
da = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[i] - l];
db = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[i] - l];
cnt1 = cnt2 = -1;
if (d1 == -1) {
if (Q_B_rem[ii - l] != 0.) {
qt += Q_B_rem[ii - l] * tmp;
if (qt >= r)
goto backtrack_qm1_early_escape;
}
if (Q_B[ii - l]) {
for (cnt1 = k_min_Q_B[ii - l];
cnt1 <= k_max_Q_B[ii - l];
cnt1++)
for (cnt2 = l_min_Q_B[ii - l][cnt1];
cnt2 <= l_max_Q_B[ii - l][cnt1];
cnt2 += 2)
if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) {
qt += Q_B[ii - l][cnt1][cnt2 / 2] * tmp;
if (qt >= r)
goto backtrack_qm1_early_escape;
}
}
} else {
/* get energy contributions */
if (d1 >= da && d2 >= db) {
if ((d1 - da >= k_min_Q_B[ii - l]) && (d1 - da <= k_max_Q_B[ii - l])) {
if ((d2 - db >= l_min_Q_B[ii - l][d1 - da]) && (d2 - db <= l_max_Q_B[ii - l][d1 - da])) {
cnt1 = d1 - da;
cnt2 = d2 - db;
qt += Q_B[ii - l][cnt1][cnt2 / 2] * tmp;
if (qt >= r)
goto backtrack_qm1_early_escape;
}
}
}
}
}
}
if (l > j)
vrna_message_error("backtrack failed in qm1");
backtrack_qm1_early_escape:
backtrack(vc, pstruc, cnt1, cnt2, i, l);
}
PRIVATE void
backtrack_qm(vrna_fold_compound_t *vc,
char *pstruc,
int d1,
int d2,
unsigned int i,
unsigned int j)
{
/* divide multiloop into qm and qm1 */
FLT_OR_DBL r, *scale;
unsigned int maxD1, maxD2, da, db, da2, db2;
unsigned int *referenceBPs1, *referenceBPs2;
int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, turn;
vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */
vrna_mx_pf_t *matrices;
pf_params = vc->exp_params;
matrices = vc->exp_matrices;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
my_iindx = vc->iindx;
jindx = vc->jindx;
scale = matrices->scale;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
turn = pf_params->model_details.min_loop_size;
FLT_OR_DBL ***Q_M, ***Q_M1, *Q_M_rem, *Q_M1_rem;
int *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1;
int **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1;
Q_M = matrices->Q_M;
k_min_Q_M = matrices->k_min_Q_M;
k_max_Q_M = matrices->k_max_Q_M;
l_min_Q_M = matrices->l_min_Q_M;
l_max_Q_M = matrices->l_max_Q_M;
Q_M1 = matrices->Q_M1;
k_min_Q_M1 = matrices->k_min_Q_M1;
k_max_Q_M1 = matrices->k_max_Q_M1;
l_min_Q_M1 = matrices->l_min_Q_M1;
l_max_Q_M1 = matrices->l_max_Q_M1;
Q_M_rem = matrices->Q_M_rem;
Q_M1_rem = matrices->Q_M1_rem;
double qmt = 0;
unsigned int k;
cnt1 = cnt2 = cnt3 = cnt4 = -1;
r = 0.;
while (j > i) {
/* now backtrack [i ... j] in qm[] */
/* find qm contribution */
if (d1 == -1) {
r = vrna_urn() * Q_M_rem[my_iindx[i] - j];
} else {
if (Q_M[my_iindx[i] - j])
if ((d1 >= k_min_Q_M[my_iindx[i] - j]) && (d1 <= k_max_Q_M[my_iindx[i] - j]))
if ((d2 >= l_min_Q_M[my_iindx[i] - j][d1]) && (d2 <= l_max_Q_M[my_iindx[i] - j][d1]))
r = vrna_urn() * Q_M[my_iindx[i] - j][d1][d2 / 2];
}
if (r == 0.)
vrna_message_error("backtrack_qm@2Dpfold.c: backtracking failed in finding qm contribution\n");
qmt = 0.;
if (d1 == -1) {
if (Q_M1_rem[jindx[j] + i] != 0.) {
qmt += Q_M1_rem[jindx[j] + i];
if (qmt >= r) {
backtrack_qm1(vc, pstruc, d1, d2, i, j);
return;
}
}
for (k = i + 1; k <= j; k++) {
FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i];
if (Q_M1_rem[jindx[j] + k] != 0.) {
qmt += Q_M1_rem[jindx[j] + k] * tmp;
if (qmt >= r) {
backtrack_qm1(vc, pstruc, d1, d2, k, j);
return;
}
}
da2 = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - j];
db2 = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - j];
if (Q_M1[jindx[j] + k]) {
for (cnt1 = k_min_Q_M1[jindx[j] + k];
cnt1 <= k_max_Q_M1[jindx[j] + k];
cnt1++)
for (cnt2 = l_min_Q_M1[jindx[j] + k][cnt1];
cnt2 <= l_max_Q_M1[jindx[j] + k][cnt1];
cnt2 += 2)
if (((cnt1 + da2) > maxD1) || ((cnt2 + db2) > maxD2)) {
qmt += Q_M1[jindx[j] + k][cnt1][cnt2 / 2] * tmp;
if (qmt >= r) {
backtrack_qm1(vc, pstruc, cnt1, cnt2, k, j);
return;
}
}
}
da = da2 - referenceBPs1[my_iindx[i] - k + 1];
db = db2 - referenceBPs2[my_iindx[i] - k + 1];
cnt1 = cnt2 = cnt3 = cnt4 = -1;
if (Q_M_rem[my_iindx[i] - k + 1] != 0.) {
if (Q_M1_rem[jindx[j] + k] != 0.) {
qmt += Q_M_rem[my_iindx[i] - k + 1] * Q_M1_rem[jindx[j] + k];
if (qmt >= r)
goto backtrack_qm_early_escape;
}
if (Q_M1[jindx[j] + k]) {
for (cnt3 = k_min_Q_M1[jindx[j] + k];
cnt3 <= k_max_Q_M1[jindx[j] + k];
cnt3++)
for (cnt4 = l_min_Q_M1[jindx[j] + k][cnt3];
cnt4 <= l_max_Q_M1[jindx[j] + k][cnt3];
cnt4 += 2) {
qmt += Q_M_rem[my_iindx[i] - k + 1] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2];
if (qmt >= r)
goto backtrack_qm_early_escape;
}
}
}
if (Q_M1_rem[jindx[j] + k] != 0.) {
cnt3 = cnt4 = -1;
if (Q_M[my_iindx[i] - k + 1]) {
for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1];
cnt1 <= k_max_Q_M[my_iindx[i] - k + 1];
cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1];
cnt2 <= l_max_Q_M[my_iindx[i] - k + 1][cnt1];
cnt2 += 2) {
qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jindx[j] + k];
if (qmt >= r)
goto backtrack_qm_early_escape;
}
}
}
if (!Q_M[my_iindx[i] - k + 1])
continue;
if (!Q_M1[jindx[j] + k])
continue;
for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1];
cnt1 <= k_max_Q_M[my_iindx[i] - k + 1];
cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1];
cnt2 <= l_max_Q_M[my_iindx[i] - k + 1][cnt1];
cnt2 += 2)
for (cnt3 = k_min_Q_M1[jindx[j] + k];
cnt3 <= k_max_Q_M1[jindx[j] + k];
cnt3++)
for (cnt4 = l_min_Q_M1[jindx[j] + k][cnt3];
cnt4 <= l_max_Q_M1[jindx[j] + k][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) {
qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2];
if (qmt >= r)
goto backtrack_qm_early_escape;
}
}
} else {
/* find corresponding qm1 contribution */
if (Q_M1[jindx[j] + i]) {
if ((d1 >= k_min_Q_M1[jindx[j] + i]) && (d1 <= k_max_Q_M1[jindx[j] + i]))
if ((d2 >= l_min_Q_M1[jindx[j] + i][d1]) && (d2 <= l_max_Q_M1[jindx[j] + i][d1]))
qmt = Q_M1[jindx[j] + i][d1][d2 / 2];
}
k = i;
if (qmt < r) {
for (k = i + 1; k <= j; k++) {
/* calculate introduced distancies to reference structures */
da2 = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - j];
db2 = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - j];
da = da2 - referenceBPs1[my_iindx[i] - k + 1];
db = db2 - referenceBPs2[my_iindx[i] - k + 1];
FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i];
/* collect unpaired + qm1 contributions */
if (d1 >= da2 && d2 >= db2) {
if ((d1 - da2 >= k_min_Q_M1[jindx[j] + k]) && (d1 - da2 <= k_max_Q_M1[jindx[j] + k])) {
if ((d2 - db2 >= l_min_Q_M1[jindx[j] + k][d1 - da2]) && (d2 - db2 <= l_max_Q_M1[jindx[j] + k][d1 - da2])) {
cnt3 = d1 - da2;
cnt4 = d2 - db2;
qmt += Q_M1[jindx[j] + k][cnt3][cnt4 / 2] * tmp;
if (qmt >= r) {
backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j);
return;
}
}
}
}
/* collect qm + qm1 contributions */
if (d1 >= da && d2 >= db && Q_M[my_iindx[i] - k + 1] && Q_M1[jindx[j] + k]) {
for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= MIN2(k_max_Q_M[my_iindx[i] - k + 1], d1 - da); cnt1++)
for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[my_iindx[i] - k + 1][cnt1], d2 - db); cnt2 += 2)
if ((d1 - da - cnt1 >= k_min_Q_M1[jindx[j] + k]) && (d1 - da - cnt1 <= k_max_Q_M1[jindx[j] + k])) {
if ((d2 - db - cnt2 >= l_min_Q_M1[jindx[j] + k][d1 - da - cnt1]) && (d2 - db - cnt2 <= l_max_Q_M1[jindx[j] + k][d1 - da - cnt1])) {
cnt3 = d1 - da - cnt1;
cnt4 = d2 - db - cnt2;
qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2];
if (qmt >= r)
goto backtrack_qm_early_escape;
}
}
}
}
} else {
backtrack_qm1(vc, pstruc, d1, d2, k, j);
return;
}
}
if (k > j)
vrna_message_error("backtrack_qm@2Dpfold.c: backtrack failed in qm");
backtrack_qm_early_escape:
backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j);
if (k < i + turn)
break; /* no more pairs */
d1 = cnt1;
d2 = cnt2;
if (d1 == referenceBPs1[my_iindx[i] - k + 1] && d2 == referenceBPs2[my_iindx[i] - k + 1]) {
/* is interval [i,k] totally unpaired? */
FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i];
r = vrna_urn() * (Q_M[my_iindx[i] - k + 1][d1][d2 / 2] + tmp);
if (tmp >= r)
return; /* no more pairs */
}
j = k - 1;
}
}
PRIVATE void
adjustArrayBoundaries(FLT_OR_DBL ***array,
int *k_min,
int *k_max,
int **l_min,
int **l_max,
int k_min_post,
int k_max_post,
int *l_min_post,
int *l_max_post)
{
int cnt1;
int k_diff_pre = k_min_post - *k_min;
int mem_size = k_max_post - k_min_post + 1;
if (k_min_post < INF) {
/* free all the unused memory behind actual data */
for (cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++) {
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
/* free unused memory before actual data */
for (cnt1 = *k_min; cnt1 < k_min_post; cnt1++) {
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
/* move data to front and thereby eliminating unused memory in front of actual data */
if (k_diff_pre > 0) {
memmove((FLT_OR_DBL **)(*array), ((FLT_OR_DBL **)(*array)) + k_diff_pre, sizeof(FLT_OR_DBL *) * mem_size);
memmove((int *)(*l_min), ((int *)(*l_min)) + k_diff_pre, sizeof(int) * mem_size);
memmove((int *)(*l_max), ((int *)(*l_max)) + k_diff_pre, sizeof(int) * mem_size);
}
/* reallocating memory to actual size used */
*array += *k_min;
*array = (FLT_OR_DBL **)realloc(*array, sizeof(FLT_OR_DBL *) * mem_size);
*array -= k_min_post;
*l_min += *k_min;
*l_min = (int *)realloc(*l_min, sizeof(int) * mem_size);
*l_min -= k_min_post;
*l_max += *k_min;
*l_max = (int *)realloc(*l_max, sizeof(int) * mem_size);
*l_max -= k_min_post;
for (cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++) {
if (l_min_post[cnt1] < INF) {
/* new memsize */
mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1) / 2 + 1;
/* reshift the pointer */
(*array)[cnt1] += (*l_min)[cnt1] / 2;
int shift = (l_min_post[cnt1] % 2 == (*l_min)[cnt1] % 2) ? 0 : 1;
/* eliminate unused memory in front of actual data */
unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1]) / 2 + shift;
if (start > 0)
memmove((FLT_OR_DBL *)((*array)[cnt1]), (FLT_OR_DBL *)((*array)[cnt1]) + start, sizeof(FLT_OR_DBL) * mem_size);
(*array)[cnt1] = (FLT_OR_DBL *)realloc((*array)[cnt1], sizeof(FLT_OR_DBL) * mem_size);
(*array)[cnt1] -= l_min_post[cnt1] / 2;
} else {
/* free according memory */
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
(*l_min)[cnt1] = l_min_post[cnt1];
(*l_max)[cnt1] = l_max_post[cnt1];
}
} else {
/* we have to free all unused memory */
for (cnt1 = *k_min; cnt1 <= *k_max; cnt1++) {
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
(*l_min) += *k_min;
(*l_max) += *k_min;
free(*l_min);
free(*l_max);
(*array) += *k_min;
free(*array);
*array = NULL;
}
l_min_post += *k_min;
l_max_post += *k_min;
*k_min = k_min_post;
*k_max = k_max_post;
free(l_min_post);
free(l_max_post);
}
PRIVATE INLINE void
preparePosteriorBoundaries(int size,
int shift,
int *min_k,
int *max_k,
int **min_l,
int **max_l)
{
int i;
*min_k = INF;
*max_k = 0;
*min_l = (int *)vrna_alloc(sizeof(int) * size);
*max_l = (int *)vrna_alloc(sizeof(int) * size);
for (i = 0; i < size; i++) {
(*min_l)[i] = INF;
(*max_l)[i] = 0;
}
*min_l -= shift;
*max_l -= shift;
}
PRIVATE INLINE void
updatePosteriorBoundaries(int d1,
int d2,
int *min_k,
int *max_k,
int **min_l,
int **max_l)
{
(*min_l)[d1] = MIN2((*min_l)[d1], d2);
(*max_l)[d1] = MAX2((*max_l)[d1], d2);
*min_k = MIN2(*min_k, d1);
*max_k = MAX2(*max_k, d1);
}
PRIVATE INLINE void
prepareBoundaries(int min_k_pre,
int max_k_pre,
int min_l_pre,
int max_l_pre,
int bpdist,
int *min_k,
int *max_k,
int **min_l,
int **max_l)
{
int cnt;
int mem = max_k_pre - min_k_pre + 1;
*min_k = min_k_pre;
*max_k = max_k_pre;
*min_l = (int *)vrna_alloc(sizeof(int) * mem);
*max_l = (int *)vrna_alloc(sizeof(int) * mem);
*min_l -= min_k_pre;
*max_l -= min_k_pre;
/* for each k guess the according minimum l*/
for (cnt = min_k_pre; cnt <= max_k_pre; cnt++) {
(*min_l)[cnt] = min_l_pre;
(*max_l)[cnt] = max_l_pre;
while ((*min_l)[cnt] + cnt < bpdist)
(*min_l)[cnt]++;
if ((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2))
(*min_l)[cnt]++;
}
}
PRIVATE INLINE void
prepareArray(FLT_OR_DBL ***array,
int min_k,
int max_k,
int *min_l,
int *max_l)
{
int i, mem;
*array = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (max_k - min_k + 1));
*array -= min_k;
for (i = min_k; i <= max_k; i++) {
mem = (max_l[i] - min_l[i] + 1) / 2 + 1;
(*array)[i] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * mem);
(*array)[i] -= min_l[i] / 2;
}
}
/*
#################################
# DEPRECATED FUNCTIONS BELOW #
#################################
*/
PRIVATE void
crosslink(TwoDpfold_vars *vars)
{
vrna_fold_compound_t *c;
vrna_mx_pf_t *m;
c = vars->compatibility;
m = c->exp_matrices;
vars->sequence = c->sequence;
vars->seq_length = c->length;
vars->reference_pt1 = c->reference_pt1;
vars->reference_pt2 = c->reference_pt2;
vars->referenceBPs1 = c->referenceBPs1;
vars->referenceBPs2 = c->referenceBPs2;
vars->mm1 = c->mm1;
vars->mm2 = c->mm2;
vars->bpdist = c->bpdist;
vars->dangles = c->exp_params->model_details.dangles;
vars->circ = c->exp_params->model_details.circ;
vars->temperature = c->exp_params->model_details.temperature;
vars->init_temp = c->exp_params->model_details.temperature;
vars->pf_scale = c->exp_params->pf_scale;
vars->pf_params = c->exp_params;
vars->scale = m->scale;
vars->ptype = c->ptype_pf_compat;
vars->S = c->sequence_encoding2;
vars->S1 = c->sequence_encoding;
vars->jindx = c->jindx;
vars->my_iindx = c->iindx;
vars->maxD1 = c->maxD1;
vars->maxD2 = c->maxD2;
vars->Q = m->Q;
vars->l_min_values = m->l_min_Q;
vars->l_max_values = m->l_max_Q;
vars->k_min_values = m->k_min_Q;
vars->k_max_values = m->k_max_Q;
vars->Q_B = m->Q_B;
vars->l_min_values_b = m->l_min_Q_B;
vars->l_max_values_b = m->l_max_Q_B;
vars->k_min_values_b = m->k_min_Q_B;
vars->k_max_values_b = m->k_max_Q_B;
vars->Q_M = m->Q_M;
vars->l_min_values_m = m->l_min_Q_M;
vars->l_max_values_m = m->l_max_Q_M;
vars->k_min_values_m = m->k_min_Q_M;
vars->k_max_values_m = m->k_max_Q_M;
vars->Q_M1 = m->Q_M1;
vars->l_min_values_m1 = m->l_min_Q_M1;
vars->l_max_values_m1 = m->l_max_Q_M1;
vars->k_min_values_m1 = m->k_min_Q_M1;
vars->k_max_values_m1 = m->k_max_Q_M1;
vars->Q_M2_rem = m->Q_M2_rem;
vars->Q_M2 = m->Q_M2;
vars->l_min_values_m2 = m->l_min_Q_M2;
vars->l_max_values_m2 = m->l_max_Q_M2;
vars->k_min_values_m2 = m->k_min_Q_M2;
vars->k_max_values_m2 = m->k_max_Q_M2;
vars->Q_c = m->Q_c;
vars->Q_cH = m->Q_cH;
vars->Q_cI = m->Q_cI;
vars->Q_cM = m->Q_cM;
vars->Q_c_rem = m->Q_c_rem;
vars->Q_cH_rem = m->Q_cH_rem;
vars->Q_cI_rem = m->Q_cI_rem;
vars->Q_cM_rem = m->Q_cM_rem;
vars->Q_rem = m->Q_rem;
vars->Q_B_rem = m->Q_B_rem;
vars->Q_M_rem = m->Q_M_rem;
vars->Q_M1_rem = m->Q_M1_rem;
}
PUBLIC char *
TwoDpfold_pbacktrack(TwoDpfold_vars *vars,
int d1,
int d2)
{
return vrna_pbacktrack_TwoD(vars->compatibility, d1, d2);
}
PUBLIC char *
TwoDpfold_pbacktrack5(TwoDpfold_vars *vars,
int d1,
int d2,
unsigned int length)
{
return vrna_pbacktrack5_TwoD(vars->compatibility, d1, d2, length);
}
PUBLIC TwoDpfold_vars *
get_TwoDpfold_variables(const char *seq,
const char *structure1,
char *structure2,
int circ)
{
vrna_md_t md;
TwoDpfold_vars *vars;
set_model_details(&md);
md.circ = circ;
vars = (TwoDpfold_vars *)malloc(sizeof(TwoDpfold_vars));
vars->compatibility = vrna_fold_compound_TwoD(seq, structure1, structure2, &md, VRNA_OPTION_PF);
crosslink(vars);
return vars;
}
PUBLIC void
destroy_TwoDpfold_variables(TwoDpfold_vars *vars)
{
if (vars == NULL)
return;
vrna_fold_compound_free(vars->compatibility);
free(vars);
}
vrna_sol_TwoD_pf_t *
TwoDpfoldList(TwoDpfold_vars *vars,
int distance1,
int distance2)
{
vrna_sol_TwoD_pf_t *sol;
sol = vrna_pf_TwoD(vars->compatibility, distance1, distance2);
crosslink(vars);
return sol;
}
|
sapB_fmt_plug.c | /*
* this is a SAP-BCODE plugin for john the ripper.
* tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-)
*
* please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it...
*
* (c) x7d8 sap loverz, public domain, btw
* cheers: see test-cases.
*
* Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and
* encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapB;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapB);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "memory.h"
#include "johnswap.h"
#include "options.h"
#include "unicode.h"
#include "md5.h"
#define FORMAT_LABEL "sapb"
#define FORMAT_NAME "SAP CODVN B (BCODE)"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#include "simd-intrinsics.h"
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#if defined(_OPENMP)
#include <omp.h>
static unsigned int omp_t = 1;
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 512 // tuned on K8-dual HT.
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#endif
#include "memdbg.h"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define SALT_FIELD_LENGTH 40 /* the max listed username length */
#define SALT_LENGTH 12 /* the max used username length */
#define PLAINTEXT_LENGTH 8 /* passwordlength max 8 chars */
#define CIPHERTEXT_LENGTH SALT_FIELD_LENGTH + 1 + 16 /* SALT + $ + 2x8 bytes for BCODE-representation */
#define BINARY_SIZE 8 /* half of md5 */
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
// NOTE GETOUTPOS is valid to return the uint32 (i.e. i is 0, 4, 8, 12, ....) which is how we use it in this format.
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32)
#if ARCH_LITTLE_ENDIAN
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define BCODE_ARRAY_LENGTH 3*16
static const unsigned char bcodeArr[BCODE_ARRAY_LENGTH] =
{ 0x14, 0x77, 0xf3, 0xd4, 0xbb, 0x71, 0x23, 0xd0, 0x03, 0xff, 0x47, 0x93, 0x55, 0xaa, 0x66, 0x91,
0xf2, 0x88, 0x6b, 0x99, 0xbf, 0xcb, 0x32, 0x1a, 0x19, 0xd9, 0xa7, 0x82, 0x22, 0x49, 0xa2, 0x51,
0xe2, 0xb7, 0x33, 0x71, 0x8b, 0x9f, 0x5d, 0x01, 0x44, 0x70, 0xae, 0x11, 0xef, 0x28, 0xf0, 0x0d };
/* char transition table for BCODE (from disp+work) */
static const unsigned char transtable[] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x3f, 0x40, 0x41, 0x50, 0x43, 0x44, 0x45, 0x4b, 0x47, 0x48, 0x4d, 0x4e, 0x54, 0x51, 0x53, 0x46,
0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x56, 0x55, 0x5c, 0x49, 0x5d, 0x4a,
0x42, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x58, 0x5b, 0x59, 0xff, 0x52,
//0x4c, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x4c, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
//0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x57, 0x5e, 0x5a, 0x4f, 0xff
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x57, 0x5e, 0x5a, 0x4f, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
// For backwards compatibility, we must support salts padded with spaces to a field width of 40
static struct fmt_tests tests[] = {
{"DDIC$C94E2F7DD0178374", "DDIC"},
// While "X" and "U" are not valid SAP passwords, they might still occur
// if passwords longer than 8 characters are allowed, and if the CODVN B
// password is calculated and stored in addition to the CODVN F or
// CODVN H password.
// Although a user picking "X Y" as a password is probably
// not very likely.
{"F $E3A65AAA9676060F", "X"},
// the 9 character password CYBERPUNK will be truncated to CYBERPUN
{"JOHNNY $7F7207932E4DE471", "CYBERPUNK"},
{"VAN $487A2A40A7BA2258", "HAUSER"},
{"ROOT $8366A4E9E6B72CB0", "KID"},
{"MAN $9F48E7CE5B184D2E", "U"},
// "-------" is not a valid SAP password (first 3 characters are
// identical)
// ("^^^^^^^" would be allowed, since "^" also replaces arbitrary
// non-ascii characters, as far as the CODVN B hash algorithm is
// concerned)
// {"------------$2CF190AF13E858A2", "-------"},
{"------------$058DE95926E00F32", "--+----"},
{"SAP*$7016BFF7C5472F1B", "MASTER"},
// password DOLLAR$$$--- will be truncated to DOLLAR$$
{"DOLLAR$$$---$C3413C498C48EB67", "DOLLAR$$$---"},
// Trigger suspected over-run of sum20. We do behave like SAP so it's
// not a problem.
{"12850413$1470EF2F683C956D", "46813230"},
// document some known hash collisions:
// 4 different 8 character passwords for the same hash:
{"EARLYWATCH$E786D382B2C88932", "VXFNI07+"},
{"EARLYWATCH$E786D382B2C88932", "VXFNI07<"},
{"EARLYWATCH$E786D382B2C88932", "VXFNI07V"},
{"EARLYWATCH$E786D382B2C88932", "VXFNI07W"},
{"EARLYWATCH$C1490E1C2AC53FFB", "COCQP098"},
{"EARLYWATCH$C1490E1C2AC53FFB", "COCQP09E"},
{"EARLYWATCH$C1490E1C2AC53FFB", "COCQP09J"},
{"EARLYWATCH$C1490E1C2AC53FFB", "COCQP09V"},
// collision of a 7 character password and 2 8 character passwords:
{"EARLYWATCH$5BCDD8FB7B827A26", "VAUBS04"},
{"EARLYWATCH$5BCDD8FB7B827A26", "VAUBS04*"},
{"EARLYWATCH$5BCDD8FB7B827A26", "VAUBS04H"},
// collision even with a 4 character user name:
{"DDIC$74DB83791A028420", "DFQEX12"},
{"DDIC$74DB83791A028420", "DFQEX12."},
{NULL}
};
#define TEMP_ARRAY_SIZE 4*16
#define DEFAULT_OFFSET 15
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*keyLen);
#ifdef SIMD_COEF_32
static unsigned char (*saved_key);
static unsigned char (*interm_key);
static unsigned char (*crypt_key);
static unsigned int (*clean_pos);
#else
static uint32_t (*crypt_key)[BINARY_SIZE/sizeof(uint32_t)];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static struct saltstruct {
unsigned int l;
unsigned char s[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
static int warned = 0;
if (options.target_enc == UTF_8 && !options.listconf && warned++ == 0)
fprintf(stderr, "Warning: SAP-B format should never be UTF-8.\nUse --target-encoding=iso-8859-1 or whatever is applicable.\n");
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = (omp_t * MIN_KEYS_PER_CRYPT);
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = (omp_t * MAX_KEYS_PER_CRYPT);
#endif
#ifdef SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
interm_key = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
clean_pos = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*clean_pos));
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
16, MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain) );
keyLen = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*keyLen));
}
static void done(void)
{
MEM_FREE(keyLen);
MEM_FREE(saved_plain);
MEM_FREE(crypt_key);
#ifdef SIMD_COEF_32
MEM_FREE(clean_pos);
MEM_FREE(interm_key);
#endif
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
char *p;
if (!ciphertext) return 0;
p = strrchr(ciphertext, '$');
if (!p) return 0;
if (p - ciphertext > SALT_FIELD_LENGTH) return 0;
if (strlen(&p[1]) != BINARY_SIZE * 2) return 0;
for (i = 0; i < p - ciphertext; i++) {
// even those lower case non-ascii characters with a
// corresponding upper case character could be rejected
if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z') return 0;
// SAP user names cannot be longer than 12 characters
if (i >= SALT_LENGTH && ciphertext[i] != ' ') return 0;
}
// SAP user name cannot start with ! or ?
if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0;
// the user name must not simply be spaces, or empty
for (i = 0; i < p - ciphertext; ++i) {
if (ciphertext[i] == ' ')
continue;
break;
}
if (ciphertext[i] == '$') return 0;
p++;
// SAP and sap2john.pl always use upper case A-F for hashes,
// so don't allow a-f
for (i = 0; i < BINARY_SIZE * 2; i++)
if (!(((p[i]>='0' && p[i]<='9')) ||
((p[i]>='A' && p[i]<='F')) ))
return 0;
return 1;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_plain[index], key, sizeof(*saved_plain));
keyLen[index] = -1;
}
static char *get_key(int index)
{
int i;
// Work-around for new self-test.
if (keyLen[index] == -1)
keyLen[index] = strlen(saved_plain[index]);
for (i = 0; i < keyLen[index]; i++) {
if (saved_plain[index][i] >= 'a' && saved_plain[index][i] <= 'z')
saved_plain[index][i] ^= 0x20;
else if (saved_plain[index][i] & 0x80)
saved_plain[index][i] = '^';
}
saved_plain[index][i] = 0;
return saved_plain[index];
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x,y=0;
#ifdef _OPENMP
for (;y<SIMD_PARA_MD5*omp_t;y++)
#else
for (;y<SIMD_PARA_MD5;y++)
#endif
for (x = 0; x < SIMD_COEF_32; x++)
{
if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x] )
return 1;
}
return 0;
#else
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int i,x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
for (i=0;i<(BINARY_SIZE/4);i++)
if ( ((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static unsigned int walld0rf_magic(const int index, const unsigned char *temp_key, unsigned char *destArray)
{
unsigned int sum20, I1, I2, I3;
const int len = keyLen[index];
#ifdef SIMD_COEF_32
#define key(i) saved_key[GETPOS(i, index)]
#else
#define key(i) saved_key[index][i]
#endif
// some magic in between....yes, byte 4 is ignored...
// sum20 will be between 0x20 and 0x2F
//sum20 = temp_key[5]%4 + temp_key[3]%4 + temp_key[2]%4 + temp_key[1]%4 + temp_key[0]%4 + 0x20;
sum20 = *(unsigned int*)temp_key & 0x03030303;
sum20 = (unsigned char)((sum20 >> 24) + (sum20 >> 16) +
(sum20 >> 8) + sum20);
sum20 += (temp_key[5] & 3) | 0x20;
#if defined (NO_UNROLL)
// MUCH easier to understand. Kept for documentation reasons.
I1 = I2 = I3 = 0;
while(I2 < sum20) {
if (I1 < len) {
if (temp_key[DEFAULT_OFFSET - I1] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - I1 - 1];
destArray[I2++] = key(I1); I1++;
}
if (I3 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - I1 - I3];
++I2;
destArray[I2++] = 0;
}
#else
// Some unrolling
if (temp_key[15] & 0x01) {
destArray[0] = bcodeArr[47];
I2 = 1;
}
else {
I2 = 0;
}
destArray[I2++] = key(0);
destArray[I2++] = cur_salt->s[0];
destArray[I2] = bcodeArr[I2-2];
destArray[++I2] = 0; I2++;
if ( len >= 6) {
I1 = 6;
if ( cur_salt->l >= 4 ) {
// key >= 6 bytes, salt >= 4 bytes
if (temp_key[14] & 0x01)
destArray[I2++] = bcodeArr[46];
destArray[I2++] = key(1);
destArray[I2++] = cur_salt->s[1];
destArray[I2] = bcodeArr[I2-4];
destArray[++I2] = 0; I2++;
if (temp_key[13] & 0x01)
destArray[I2++] = bcodeArr[45];
destArray[I2++] = key(2);
destArray[I2++] = cur_salt->s[2];
destArray[I2] = bcodeArr[I2-6];
destArray[++I2] = 0; I2++;
if (temp_key[12] & 0x01)
destArray[I2++] = bcodeArr[44];
destArray[I2++] = key(3);
destArray[I2++] = cur_salt->s[3];
destArray[I2] = bcodeArr[I2-8];
destArray[++I2] = 0; I2++;
I3 = 4;
if (temp_key[DEFAULT_OFFSET - 4] & 0x01)
destArray[I2++] = bcodeArr[43];
destArray[I2++] = key(4);
if (4 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 5 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 5] & 0x01)
destArray[I2++] = bcodeArr[42];
destArray[I2++] = key(5);
if (5 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 6 - I3];
destArray[++I2] = 0; I2++;
if (6 < len) {
if (temp_key[DEFAULT_OFFSET - 6] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 7];
destArray[I2++] = key(6); I1++;
}
if (6 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
} else {
// Key >= 6 bytes, salt < 4 Bytes
I3 = 1;
if (temp_key[DEFAULT_OFFSET - 1] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 2];
destArray[I2++] = key(1);
if (1 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 2 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 2] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 3];
destArray[I2++] = key(2);
if (2 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 3 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 3] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 4];
destArray[I2++] = key(3);
destArray[I2] = bcodeArr[I2 - 4 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 4] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 5];
destArray[I2++] = key(4);
destArray[I2] = bcodeArr[I2 - 5 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 5] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 6];
destArray[I2++] = key(5);
destArray[I2] = bcodeArr[I2 - 6 - I3];
destArray[++I2] = 0; I2++;
if (6 < len) {
if (temp_key[DEFAULT_OFFSET - 6] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 7];
destArray[I2++] = key(6); I1++;
}
}
destArray[I2] = bcodeArr[I2 - I1 - I3];
destArray[++I2] = 0; I2++;
} else {
I1 = I3 = 1;
}
// End of unrolling. Now the remaining bytes
while(I2 < sum20) {
if (I1 < len) {
if (temp_key[DEFAULT_OFFSET - I1] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - I1 - 1];
destArray[I2++] = key(I1); I1++;
}
if (I3 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - I1 - I3];
destArray[++I2] = 0; I2++;
}
#endif
#if SIMD_COEF_32
// This may be unaligned here, but after the aligned vector buffer
// transfer, we will have no junk left from loop overrun
memcpy(&destArray[sum20], "\x80\0\0\0", 4); // this might be a source of BE alignment problems.
#endif
return sum20;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#if SIMD_COEF_32
#if defined(_OPENMP)
int t;
#pragma omp parallel for
for (t = 0; t < omp_t; t++)
#define ti (t*NBKEYS+index)
#else
#define t 0
#define ti index
#endif
{
unsigned int index, i;
for (index = 0; index < NBKEYS; index++) {
int len;
if ((len = keyLen[ti]) < 0) {
unsigned char *key;
// Load key into vector buffer
len = 0;
key = (unsigned char*)saved_plain[ti];
while (*key)
{
saved_key[GETPOS(len, ti)] =
transtable[*key++];
len++;
}
// Back-out of trailing spaces
while(len && *--key == ' ')
{
len--;
saved_key[GETPOS(len, ti)] = 0;
}
keyLen[ti] = len;
}
// Prepend the salt
for (i = 0; i < cur_salt->l; i++)
saved_key[GETPOS((len + i), ti)] =
cur_salt->s[i];
saved_key[GETPOS((len + i), ti)] = 0x80;
((unsigned int *)saved_key)[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (unsigned int)ti/SIMD_COEF_32*16*SIMD_COEF_32] = (len + i) << 3;
// Clean rest of buffer
for (i = i + len + 1; i <= clean_pos[ti]; i++)
saved_key[GETPOS(i, ti)] = 0;
clean_pos[ti] = len + cur_salt->l;
}
SIMDmd5body(&saved_key[t*NBKEYS*64],
(unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);
for (i = 0; i < SIMD_PARA_MD5; i++)
memset(&interm_key[t*64*NBKEYS+i*64*SIMD_COEF_32+32*SIMD_COEF_32], 0, 32*SIMD_COEF_32);
for (index = 0; index < NBKEYS; index++) {
unsigned int sum20;
// note, without the union (just type casting to uint32_t*) was causing weird problems
// compiling for ppc64 (BE). This was seen for other things, where typecasting caused
// problems. Using a union, solved the problem fully.
union {
unsigned char temp_key[BINARY_SIZE*2];
uint32_t temp_keyw[BINARY_SIZE/2];
} x;
uint32_t destArray[TEMP_ARRAY_SIZE / 4];
const unsigned int *sw;
unsigned int *dw;
// Temporary flat copy of crypt
sw = (unsigned int*)&crypt_key[GETOUTPOS(0, ti)];
for (i = 0; i < 4; i++, sw += SIMD_COEF_32)
#if ARCH_LITTLE_ENDIAN
x.temp_keyw[i] = *sw;
#else
x.temp_keyw[i] = JOHNSWAP(*sw);
#endif
//now: walld0rf-magic [tm], (c), <g>
sum20 = walld0rf_magic(ti, x.temp_key, (unsigned char*)destArray);
// Vectorize a word at a time
#if ARCH_LITTLE_ENDIAN
dw = (unsigned int*)&interm_key[GETPOS(0, ti)];
for (i = 0;i <= sum20; i += 4, dw += SIMD_COEF_32)
*dw = destArray[i >> 2];
#else
dw = (unsigned int*)&interm_key[GETPOS(3, ti)];
for (i = 0;i <= sum20; i += 4, dw += SIMD_COEF_32)
*dw = JOHNSWAP(destArray[i >> 2]);
#endif
((unsigned int *)interm_key)[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (unsigned int)ti/SIMD_COEF_32*16*SIMD_COEF_32] = sum20 << 3;
}
SIMDmd5body(&interm_key[t*NBKEYS*64],
(unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);
for (index = 0; index < NBKEYS; index++) {
*(uint32_t*)&crypt_key[GETOUTPOS(0, ti)] ^= *(uint32_t*)&crypt_key[GETOUTPOS(8, ti)];
*(uint32_t*)&crypt_key[GETOUTPOS(4, ti)] ^= *(uint32_t*)&crypt_key[GETOUTPOS(12, ti)];
}
}
#else
#ifdef _OPENMP
int t;
#pragma omp parallel for
for (t = 0; t < count; t++)
#else
#define t 0
#endif
{
unsigned char temp_key[BINARY_SIZE*2];
unsigned char final_key[BINARY_SIZE*2];
unsigned int i;
unsigned int sum20;
unsigned char destArray[TEMP_ARRAY_SIZE];
MD5_CTX ctx;
if (keyLen[t] < 0) {
keyLen[t] = strlen(saved_plain[t]);
// Back-out of trailing spaces
while ( saved_plain[t][keyLen[t] - 1] == ' ' )
{
if (keyLen[t] == 0) break;
saved_plain[t][--keyLen[t]] = 0;
}
for (i = 0; i < keyLen[t]; i++)
saved_key[t][i] = transtable[ARCH_INDEX(saved_plain[t][i])];
}
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[t], keyLen[t]);
MD5_Update(&ctx, cur_salt->s, cur_salt->l);
MD5_Final(temp_key,&ctx);
//now: walld0rf-magic [tm], (c), <g>
sum20 = walld0rf_magic(t, temp_key, destArray);
MD5_Init(&ctx);
MD5_Update(&ctx, destArray, sum20);
MD5_Final(final_key, &ctx);
for (i = 0; i < 8; i++)
((char*)crypt_key[t])[i] = final_key[i + 8] ^ final_key[i];
}
#endif
return count;
#undef t
#undef ti
}
static void *get_binary(char *ciphertext)
{
static uint32_t binary[BINARY_SIZE / sizeof(uint32_t)];
char *realcipher = (char*)binary;
int i;
char* newCiphertextPointer;
newCiphertextPointer = strrchr(ciphertext, '$') + 1;
for (i=0;i<BINARY_SIZE;i++)
{
realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])];
}
#if !ARCH_LITTLE_ENDIAN && defined (SIMD_COEF_32)
alter_endianity(realcipher, BINARY_SIZE);
#endif
return (void *)realcipher;
}
// Salt is already trimmed and 8-bit converted in split()
static void *get_salt(char *ciphertext)
{
int i;
static struct saltstruct out;
/* We don't care about trailing garbage, but loader does */
memset(out.s, 0, sizeof(out.s));
out.l = (int)(strrchr(ciphertext, '$') - ciphertext);
for (i = 0; i < out.l; ++i)
out.s[i] = transtable[ARCH_INDEX(ciphertext[i])];
return &out;
}
// Here, we remove any salt padding, trim it to 12 bytes
// and finally replace any 8-bit character with '^'
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
char *p;
int i;
p = strrchr(ciphertext, '$');
i = (int)(p - ciphertext) - 1;
while (ciphertext[i] == ' ' || i >= SALT_LENGTH)
i--;
i++;
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, i);
strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i);
p = &out[i];
while(--p >= out)
if (*p & 0x80)
*p = '^';
return out;
}
#define COMMON_GET_HASH_SIMD32 4
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
struct saltstruct *s = (struct saltstruct*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < s->l; i++)
hash = ((hash << 5) + hash) ^ s->s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_sapB = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_TRUNC | FMT_OMP | FMT_8_BIT,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
test.c | #define N 1024
#define _GNU_SOURCE
#include <link.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
// If one of the libomptarget plugins has been loaded, it means we are running
// w/ith libomptarget. libomptarget.so is also used by LOMP, so we need to check
// for libomptarget.rtl.*.
/*
static int isLibomptarget(struct dl_phdr_info *info, size_t size,
void *data) {
if (strstr(info->dlpi_name, "libomptarget.rtl") != NULL) {
*((int *) data) = 1;
return 1;
}
return 0;
}
*/
#define TEST_NESTED 1
#define TEST_CONCURRENT 1
#define TEST_CONCURRENT_TF 1
#define TEST_PARALLEL1 1
int a[N], b[N];
int main() {
int i;
int error, totError = 0;
#if TEST_NESTED
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps completed successfully\n");
} else {
printf(" test with nested maps completed with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT_TF
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent with to/from maps completed successfully\n");
} else {
printf(" test with concurrent with to/from maps completed with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT
// This test cannot run correctly with libomptarget because the library does
// not support proper async. Fake the output in this case.
//int libomptargetInUse = 0;
//dl_iterate_phdr(isLibomptarget, &libomptargetInUse);
//if (libomptargetInUse) {
// printf(" test with concurrent maps completed successfully\n");
//} else {
// Run actual test
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target enter data map(to:a)
#pragma omp target nowait map(to:b)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp target exit data map(from: a)
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent maps completed successfully\n");
} else {
printf(" test with concurrent maps completed with %d error(s)\n", error);
totError++;
}
//}
#endif
#if TEST_PARALLEL1
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp parallel num_threads(1)
{
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps and Parallel 1 thread completed successfully\n");
} else {
printf(" test with nested maps and Parallel 1 thread completed with %d error(s)\n", error);
totError++;
}
#endif
printf("completed with %d errors\n", totError);
return totError;
}
|
optimizer.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
double *q_cond = opt->q_cond;
double *dm_cond = opt->dm_cond;
assert(q_cond);
assert(dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = q_cond[i*n+j] * q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((4*dm_cond[j*n+i] > dmin)
|| (4*dm_cond[l*n+k] > dmin)
|| ( dm_cond[j*n+k] > dmin)
|| ( dm_cond[j*n+l] > dmin)
|| ( dm_cond[i*n+k] > dmin)
|| ( dm_cond[i*n+l] > dmin));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
opt->q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const int nao = ao_loc[nbas];
double dmax, tmp;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
// symmetrize dm_cond because nrs8_prescreen only tests the lower (or upper)
// triangular part of dm_cond. If density matrix is not hermitian, some
// integrals may be skipped incorrectly.
tmp = .5 * (fabs(pdm[i*nao+j]) + fabs(pdm[j*nao+i]));
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
opt->dm_cond[jsh*nbas+ish] = dmax;
} }
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, intor, cintopt, ao_loc,
atm, natm, bas, nbas, env);
}
|
omp-single-2.c | #include <omp.h>
extern void abort (void);
struct X
{
int a;
char b;
int c;
};
main()
{
int i = 0;
struct X x;
int bad = 0;
#pragma omp parallel private (i, x) shared (bad)
{
i = 5;
#pragma omp single copyprivate (i, x)
{
i++;
x.a = 23;
x.b = 42;
x.c = 26;
}
if (i != 6 || x.a != 23 || x.b != 42 || x.c != 26)
bad = 1;
}
if (bad)
abort ();
return 0;
}
|
GB_binop__cmplx_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp32)
// C=scalar+B GB (_bind1st__cmplx_fp32)
// C=scalar+B' GB (_bind1st_tran__cmplx_fp32)
// C=A+scalar GB (_bind2nd__cmplx_fp32)
// C=A'+scalar GB (_bind2nd_tran__cmplx_fp32)
// C type: GxB_FC32_t
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = GxB_CMPLXF (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GxB_CMPLXF (GBX (Ax, pA, A_iso), 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GxB_CMPLXF (GBX (Bx, pB, B_iso), 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLXF (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CMPLX || GxB_NO_FP32 || GxB_NO_CMPLX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__cmplx_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = GxB_CMPLXF (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = GxB_CMPLXF (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
yolov2.h | #ifndef YOLOV2_H
#define YOLOV2_H
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <fcntl.h>
#include <string.h>
#include <assert.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
//#include "yolo_hls.h"
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38F
#endif
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYER_TYPE;
struct network;
typedef struct network network;
struct layer;
typedef struct layer layer;
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
void (*forward) (struct layer, struct network);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
// int dontload;
int dontsave;
// int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
//tree *softmax_tree;
size_t workspace_size;
};
void free_layer(layer l)
{
if(l.cweights) free(l.cweights);
if(l.indexes) free(l.indexes);
if(l.input_layers) free(l.input_layers);
if(l.input_sizes) free(l.input_sizes);
if(l.map) free(l.map);
if(l.rand) free(l.rand);
if(l.cost) free(l.cost);
if(l.state) free(l.state);
if(l.prev_state) free(l.prev_state);
if(l.forgot_state) free(l.forgot_state);
if(l.forgot_delta) free(l.forgot_delta);
if(l.state_delta) free(l.state_delta);
if(l.concat) free(l.concat);
if(l.concat_delta) free(l.concat_delta);
if(l.binary_weights) free(l.binary_weights);
if(l.biases) free(l.biases);
if(l.bias_updates) free(l.bias_updates);
if(l.scales) free(l.scales);
if(l.scale_updates) free(l.scale_updates);
if(l.weights) free(l.weights);
if(l.weight_updates) free(l.weight_updates);
if(l.delta) free(l.delta);
if(l.output) free(l.output);
if(l.squared) free(l.squared);
if(l.norms) free(l.norms);
if(l.spatial_mean) free(l.spatial_mean);
if(l.mean) free(l.mean);
if(l.variance) free(l.variance);
if(l.mean_delta) free(l.mean_delta);
if(l.variance_delta) free(l.variance_delta);
if(l.rolling_mean) free(l.rolling_mean);
if(l.rolling_variance) free(l.rolling_variance);
if(l.x) free(l.x);
if(l.x_norm) free(l.x_norm);
if(l.m) free(l.m);
if(l.v) free(l.v);
if(l.z_cpu) free(l.z_cpu);
if(l.r_cpu) free(l.r_cpu);
if(l.h_cpu) free(l.h_cpu);
if(l.binary_input) free(l.binary_input);
}
//void free_layer(layer);
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} learning_rate_policy;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
learning_rate_policy policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
// tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
network *make_network(int n);
layer get_network_output_layer(network *net);
typedef struct {
int w;
int h;
float scale;
float rad;
float dx;
float dy;
float aspect;
} augment_args;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
typedef struct{
float x, y, w, h;
} box;
typedef struct detection{
box bbox;
int classes;
float *prob;
float *mask;
float objectness;
int sort_class;
} detection;
typedef struct matrix{
int rows, cols;
float **vals;
} matrix;
typedef struct{
int w, h;
matrix X;
matrix y;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
} data_type;
typedef struct load_args{
int threads;
char **paths;
char *path;
int n;
int m;
char **labels;
int h;
int w;
int out_w;
int out_h;
int nh;
int nw;
int num_boxes;
int min, max, size;
int classes;
int background;
int scale;
int center;
int coords;
float jitter;
float angle;
float aspect;
float saturation;
float exposure;
float hue;
data *d;
image *im;
image *resized;
data_type type;
// tree *hierarchy;
} load_args;
typedef struct{
int id;
float x,y,w,h;
float left, right, top, bottom;
} box_label;
//network *load_network(char *cfg, char *weights, int clear);
//load_args get_base_args(network *net);
//void free_data(data d);
typedef struct{
char *key;
char *val;
int used;
} kvp;
typedef struct node{
void *val;
struct node *next;
struct node *prev;
} node;
typedef struct list{
int size;
node *front;
node *back;
} list;
void error(const char *s)
{
perror(s);
assert(0);
exit(-1);
}
void malloc_error()
{
fprintf(stderr, "Malloc error\n");
exit(-1);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
/////////////////list begin
list *make_list()
{
list *l = (list *)malloc(sizeof(list));
l->size = 0;
l->front = 0;
l->back = 0;
return l;
}
void *list_pop(list *l){
if(!l->back) return 0;
node *b = l->back;
void *val = b->val;
l->back = b->prev;
if(l->back) l->back->next = 0;
free(b);
--l->size;
return val;
}
void list_insert(list *l, void *val)
{
node *new_node = (node *)malloc(sizeof(node));
new_node->val = val;
new_node->next = 0;
if(!l->back){
l->front = new_node;
new_node->prev = 0;
}else{
l->back->next = new_node;
new_node->prev = l->back;
}
l->back = new_node;
++l->size;
}
void free_node(node *n)
{
node *next;
while(n) {
next = n->next;
free(n);
n = next;
}
}
void free_list(list *l)
{
free_node(l->front);
free(l);
}
void free_list_contents(list *l)
{
node *n = l->front;
while(n){
free(n->val);
n = n->next;
}
}
void **list_to_array(list *l)
{
void **a = (void **)calloc(l->size, sizeof(void*));
int count = 0;
node *n = l->front;
while(n){
a[count++] = n->val;
n = n->next;
}
return a;
}
/////////////////list end
/////////////////////utils begin
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
unsigned char *read_file(char *filename)
{
FILE *fp = fopen(filename, "rb");
size_t size;
fseek(fp, 0, SEEK_END);
size = ftell(fp);
fseek(fp, 0, SEEK_SET);
unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char));
fread(text, 1, size, fp);
fclose(fp);
return text;
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n') ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char *line = (char *)malloc(size*sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char *)realloc(line, size*sizeof(char));
if(!line) {
printf("%ld\n", size);
malloc_error();
}
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(line[curr-1] == '\n') line[curr-1] = '\0';
return line;
}
/////////////////////utils end
////////////////////option_list begin
void option_insert(list *l, char *key, char *val)
{
kvp *p = (kvp *)malloc(sizeof(kvp));
p->key = key;
p->val = val;
p->used = 0;
list_insert(l, p);
}
int read_option(char *s, list *options)
{
size_t i;
size_t len = strlen(s);
char *val = 0;
for(i = 0; i < len; ++i){
if(s[i] == '='){
s[i] = '\0';
val = s+i+1;
break;
}
}
if(i == len-1) return 0;
char *key = s;
option_insert(options, key, val);
return 1;
}
void option_unused(list *l)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(!p->used){
fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
}
n = n->next;
}
}
char *option_find(list *l, char *key)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(strcmp(p->key, key) == 0){
p->used = 1;
return p->val;
}
n = n->next;
}
return 0;
}
char *option_find_str(list *l, char *key, char *def)
{
char *v = option_find(l, key);
if(v) return v;
if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
return def;
}
int option_find_int(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
fprintf(stderr, "%s: Using default '%d'\n", key, def);
return def;
}
int option_find_int_quiet(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
return def;
}
float option_find_float_quiet(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
return def;
}
float option_find_float(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
fprintf(stderr, "%s: Using default '%lf'\n", key, def);
return def;
}
list *read_data_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
///////////////////option_list end
image make_empty_image(int w, int h, int c)
{
image out;
out.data = 0;
out.h = h;
out.w = w;
out.c = c;
return out;
}
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
image make_image(int w, int h, int c)
{
image out = make_empty_image(w,h,c);
out.data = (float *)calloc(h*w*c, sizeof(float));
return out;
}
static float get_pixel(image m, int x, int y, int c)
{
assert(x < m.w && y < m.h && c < m.c);
return m.data[c*m.h*m.w + y*m.w + x];
}
static void set_pixel(image m, int x, int y, int c, float val)
{
if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] = val;
}
static void add_pixel(image m, int x, int y, int c, float val)
{
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] += val;
}
void free_image(image m)
{
if(m.data){
free(m.data);
}
}
image resize_image(image im, int w, int h)
{
image resized = make_image(w, h, im.c);
image part = make_image(w, im.h, im.c);
int r, c, k;
float w_scale = (float)(im.w - 1) / (w - 1);
float h_scale = (float)(im.h - 1) / (h - 1);
for(k = 0; k < im.c; ++k){
for(r = 0; r < im.h; ++r){
for(c = 0; c < w; ++c){
float val = 0;
if(c == w-1 || im.w == 1){
val = get_pixel(im, im.w-1, r, k);
} else {
float sx = c*w_scale;
int ix = (int) sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
}
set_pixel(part, c, r, k, val);
}
}
}
for(k = 0; k < im.c; ++k){
for(r = 0; r < h; ++r){
float sy = r*h_scale;
int iy = (int) sy;
float dy = sy - iy;
for(c = 0; c < w; ++c){
float val = (1-dy) * get_pixel(part, c, iy, k);
set_pixel(resized, c, r, k, val);
}
if(r == h-1 || im.h == 1) continue;
for(c = 0; c < w; ++c){
float val = dy * get_pixel(part, c, iy+1, k);
add_pixel(resized, c, r, k, val);
}
}
}
free_image(part);
return resized;
}
void fill_image(image m, float s)
{
int i;
for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
}
void embed_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x,y,k);
set_pixel(dest, dx+x, dy+y, k, val);
}
}
}
}
image letterbox_image(image im, int w, int h)
{
int new_w = im.w;
int new_h = im.h;
if (((float)w/im.w) < ((float)h/im.h)) {
new_w = w;
new_h = (im.h * w)/im.w;
} else {
new_h = h;
new_w = (im.w * h)/im.h;
}
image resized = resize_image(im, new_w, new_h);
image boxed = make_image(w, h, im.c);
fill_image(boxed, .5);
//int i;
//for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
free_image(resized);
return boxed;
}
image load_image_stb(char *filename, int channels)
{
int w, h, c;
unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
if (!data) {
fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
exit(0);
}
if(channels) c = channels;
int i,j,k;
image im = make_image(w, h, c);
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int dst_index = i + w*j + w*h*k;
int src_index = k + c*i + c*w*j;
im.data[dst_index] = (float)data[src_index]/255.;
}
}
}
free(data);
return im;
}
void save_image_png(image im, const char *name)
{
char buff[256];
//sprintf(buff, "%s (%d)", name, windows);
sprintf(buff, "%s.png", name);
unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char));
int i,k;
for(k = 0; k < im.c; ++k){
for(i = 0; i < im.w*im.h; ++i){
data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
}
}
int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
free(data);
if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
image **load_alphabet()
{
int i, j;
const int nsize = 8;
image **alphabets = (image **)calloc(nsize, sizeof(image));
for(j = 0; j < nsize; ++j){
alphabets[j] = (image *)calloc(128, sizeof(image));
for(i = 32; i < 127; ++i){
char buff[256];
sprintf(buff, "labels/%d_%d.png", i, j);
//alphabets[j][i] = load_image_color(buff, 0, 0);
alphabets[j][i] = load_image_stb(buff, 3);
}
}
return alphabets;
}
///////////////////activation begin
static inline float stair_activate(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
static inline float hardtan_activate(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
static inline float linear_activate(float x){return x;}
static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;}
static inline float relu_activate(float x){return x*(x>0);}
static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
static inline float relie_activate(float x){return (x>0) ? x : .01*x;}
static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
static inline float leaky_activate(float x){return (x>0) ? x : .1*x;}
static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
static inline float plse_activate(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
static inline float lhtan_activate(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
static inline float lhtan_gradient(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
static inline float hardtan_gradient(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
static inline float linear_gradient(float x){return 1;}
static inline float logistic_gradient(float x){return (1-x)*x;}
static inline float loggy_gradient(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
static inline float stair_gradient(float x)
{
if (floor(x) == x) return 0;
return 1;
}
static inline float relu_gradient(float x){return (x>0);}
static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);}
static inline float relie_gradient(float x){return (x>0) ? 1 : .01;}
static inline float ramp_gradient(float x){return (x>0)+.1;}
static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;}
static inline float tanh_gradient(float x){return 1-x*x;}
static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;}
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
for(i = 0; i < n; ++i){
x[i] = activate(x[i], a);
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case ELU:
return elu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
///////////////////activation end
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
//printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
}
}
}
}
}
void forward_shortcut_layer(const layer l, network net)
{
//copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
//shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
int w = l.w;
int h = l.h;
int c = l.c;
float *add = net.layers[l.index].output;
float *out = l.output;
float *in = net.input;
int i,j,k;
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int index = i + w*(j + h*k );
out[index] = in[index] + add[index];
}
}
}
}
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
layer l;
memset(&l,0,sizeof(layer));
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.index = index;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_shortcut_layer;
return l;
}
int convolutional_out_height(layer l)
{
return (l.h + 2*l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(layer l)
{
return (l.w + 2*l.pad - l.size) / l.stride + 1;
}
static size_t get_workspace_size(layer l){
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}
void add_bias(float *output, float *biases, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] += biases[i];
}
}
}
}
void scale_bias(float *output, float *scales, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] *= scales[i];
}
}
}
}
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c,h,w;
int height_col = (height + 2*pad - ksize) / stride + 1;
int width_col = (width + 2*pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(TA && !TB)
// gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(!TA && TB)
// gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else
// gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
}
}
}
void forward_batchnorm_layer(layer l, network net)//for conv
{
normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
}
void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding)
{
// (output_w - 1)*Kernel_stride + Kernel_size = Input_w
const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int x, y, of, inf;
int m,n;
for( of = 0; of < OutFM_num; of++){
for( y = 0; y < output_h; y++) {
for( x = 0; x < output_w; x++){
float tmp = 0.0;
for(inf = 0;inf < InFM_num; inf++)
{
int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding;
for(m = 0;m < Kernel_size; m++)
{
for(n = 0;n < Kernel_size; n++)
{
int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size;
bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w);
bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h);
if(inFM_width&&inFM_height)
tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n];
}
}
}
Output[of*output_w*output_h + y*output_w + x] = tmp;
}
}
}
}
void forward_convolutional_layer(layer l, network net)
{
int i, j;
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
//int m = l.n/l.groups;
//int k = l.size*l.size*l.c/l.groups;
//int n = l.out_w*l.out_h;
//for(i = 0; i < l.batch; ++i){
// for(j = 0; j < l.groups; ++j){
// float *a = l.weights + j*l.nweights/l.groups;
// float *b = net.workspace;
// float *c = l.output + (i*l.groups + j)*n*m;
// im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w,
// l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
// gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
// }
//}
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
float *a = l.weights;
float *b = net.workspace;
float *c = l.output;
im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b);
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
//CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
if(l.batch_normalize){
forward_batchnorm_layer(l, net);
} else {
add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = CONVOLUTIONAL;
l.groups = groups;
l.h = h;
l.w = w;
l.c = c;
l.n = n;
l.binary = binary;
l.xnor = xnor;
l.batch = batch;
l.stride = stride;
l.size = size;
l.pad = padding;
l.batch_normalize = batch_normalize;
// l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float));
// l.biases = (float *)calloc(n, sizeof(float));
l.nweights = c/groups*n*size*size;
l.nbiases = n;
int out_w = convolutional_out_width(l);
int out_h = convolutional_out_height(l);
l.out_h = out_h;
l.out_w = out_w;
l.out_c = n;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = l.w * l.h * l.c;
// l.output = (float *)calloc(l.batch*l.outputs, sizeof(float));
l.forward = forward_convolutional_layer;
if(batch_normalize){
// l.scales = (float *)calloc(n, sizeof(float));
// l.rolling_mean = (float *)calloc(n, sizeof(float));
//l.rolling_variance = (float *)calloc(n, sizeof(float));
}
l.workspace_size = get_workspace_size(l);
l.activation = activation;
fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
return l;
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if(forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void forward_upsample_layer(const layer l, network net)
{
//fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
int c = l.c;
int h = l.h;
int w = l.w;
int stride = l.stride;
float *in = net.input;
float *out = l.output;
int i, j, k;
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = k*w*h + (j/stride)*w + i/stride;
int out_index = k*w*h*stride*stride + j*w*stride + i;
out[out_index] = in[in_index];
}
}
}
}
layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c;
if(stride < 0){
stride = -stride;
l.reverse=1;
l.out_w = w/stride;
l.out_h = h/stride;
}
l.stride = stride;
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.w*l.h*l.c;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_upsample_layer;
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
void forward_route_layer(const layer l, network net)
{
int i, j;
int offset = 0;
for(i = 0; i < l.n; ++i){
int index = l.input_layers[i];
float *input = net.layers[index].output;
int input_size = l.input_sizes[i];
copy_cpu(input_size, input, 1, l.output + offset, 1);
offset += input_size;
}
}
layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
fprintf(stderr,"route ");
layer l;
memset(&l,0,sizeof(layer));
l.type = ROUTE;
l.batch = batch;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
int i;
int outputs = 0;
for(i = 0; i < n; ++i){
fprintf(stderr," %d", input_layers[i]);
outputs += input_sizes[i];
}
fprintf(stderr, "\n");
l.outputs = outputs;
l.inputs = outputs;
// l.output = (float *)calloc(outputs*batch, sizeof(float));;
l.forward = forward_route_layer;
return l;
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
}
void forward_yolo_layer(const layer l, network net)
{
int i,j,b,t,n;
//char line[256];
//FILE *fp3;
//char filename[256];
//sprintf(filename, "yolo_layer_%d.txt", l.outputs);
//printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
// if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
//int x;
// for( x = 0; x < l.outputs; x++)
//{
// sprintf(line, "%f\n", net.input[x]);
// if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
// }
// fclose(fp3);
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
}
}
return ;
}
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = YOLO;
l.n = n;
l.total = total;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + 4 + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
//l.cost = (float *)calloc(1, sizeof(float));
l.biases = (float *)calloc(total*2, sizeof(float));
if(mask) l.mask = mask;
else{
l.mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
l.mask[i] = i;
}
}
//l.bias_updates = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + 4 + 1);
l.inputs = l.outputs;
//l.truths = 90*(4 + 1);
//l.delta = (float *)calloc(batch*l.outputs, sizeof(float));
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
for(i = 0; i < total*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_yolo_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
/////////////////praser begin
typedef struct{
char *type;
list *options;
}section;
list *read_cfg(char *filename);
LAYER_TYPE string_to_layer_type(char * type)
{
if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
if (strcmp(type, "[region]")==0) return REGION;
if (strcmp(type, "[yolo]")==0) return YOLO;
if (strcmp(type, "[local]")==0) return LOCAL;
if (strcmp(type, "[conv]")==0
|| strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL;
if (strcmp(type, "[deconv]")==0
|| strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
if (strcmp(type, "[activation]")==0) return ACTIVE;
if (strcmp(type, "[logistic]")==0) return LOGXENT;
if (strcmp(type, "[l2norm]")==0) return L2NORM;
if (strcmp(type, "[net]")==0
|| strcmp(type, "[network]")==0) return NETWORK;
if (strcmp(type, "[crnn]")==0) return CRNN;
if (strcmp(type, "[gru]")==0) return GRU;
if (strcmp(type, "[lstm]") == 0) return LSTM;
if (strcmp(type, "[rnn]")==0) return RNN;
if (strcmp(type, "[conn]")==0
|| strcmp(type, "[connected]")==0) return CONNECTED;
if (strcmp(type, "[max]")==0
|| strcmp(type, "[maxpool]")==0) return MAXPOOL;
if (strcmp(type, "[reorg]")==0) return REORG;
if (strcmp(type, "[avg]")==0
|| strcmp(type, "[avgpool]")==0) return AVGPOOL;
if (strcmp(type, "[dropout]")==0) return DROPOUT;
if (strcmp(type, "[lrn]")==0
|| strcmp(type, "[normalization]")==0) return NORMALIZATION;
if (strcmp(type, "[batchnorm]")==0) return BATCHNORM;
if (strcmp(type, "[soft]")==0
|| strcmp(type, "[softmax]")==0) return SOFTMAX;
if (strcmp(type, "[route]")==0) return ROUTE;
if (strcmp(type, "[upsample]")==0) return UPSAMPLE;
return BLANK;
}
void free_section(section *s)
{
free(s->type);
node *n = s->options->front;
while(n){
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
node *next = n->next;
free(n);
n = next;
}
free(s->options);
free(s);
}
void parse_data(char *data, float *a, int n)
{
int i;
if(!data) return;
char *curr = data;
char *next = data;
int done = 0;
for(i = 0; i < n && !done; ++i){
while(*++next !='\0' && *next != ',');
if(*next == '\0') done = 1;
*next = '\0';
sscanf(curr, "%g", &a[i]);
curr = next+1;
}
}
typedef struct size_params{
int batch;
int inputs;
int h;
int w;
int c;
int index;
int time_steps;
network *net;
} size_params;
layer parse_convolutional(list *options, size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
int stride = option_find_int(options, "stride",1);
int pad = option_find_int_quiet(options, "pad",0);
int padding = option_find_int_quiet(options, "padding",0);
int groups = option_find_int_quiet(options, "groups", 1);
if(pad) padding = size/2;
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam);
l.flipped = option_find_int_quiet(options, "flipped", 0);
l.dot = option_find_float_quiet(options, "dot", 0);
return l;
}
int *parse_yolo_mask(char *a, int *num)
{
int *mask = 0;
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int val = atoi(a);
mask[i] = val;
a = strchr(a, ',')+1;
}
*num = n;
}
return mask;
}
layer parse_yolo(list *options, size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
int num = total;
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
assert(l.outputs == params.inputs);
l.max_boxes = option_find_int_quiet(options, "max",90);
l.jitter = option_find_float(options, "jitter", .2);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.random = option_find_int_quiet(options, "random", 0);
a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
layer parse_shortcut(list *options, size_params params, network *net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if(index < 0) index = params.index + index;
int batch = params.batch;
layer from = net->layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
s.alpha = option_find_float_quiet(options, "alpha", 1);
s.beta = option_find_float_quiet(options, "beta", 1);
return s;
}
layer parse_upsample(list *options, size_params params, network *net)
{
int stride = option_find_int(options, "stride",2);
layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
layer parse_route(list *options, size_params params, network *net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
if(!l) error("Route Layer must specify input layers");
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *layers = (int *)calloc(n, sizeof(int));
int *sizes = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int index = atoi(l);
l = strchr(l, ',')+1;
if(index < 0) index = params.index + index;
layers[i] = index;
sizes[i] = net->layers[index].outputs;
}
int batch = params.batch;
layer route_layer = make_route_layer(batch, n, layers, sizes);
layer first = net->layers[layers[0]];
route_layer.out_w = first.out_w;
route_layer.out_h = first.out_h;
route_layer.out_c = first.out_c;
for(i = 1; i < n; ++i){
int index = layers[i];
layer next = net->layers[index];
if(next.out_w == first.out_w && next.out_h == first.out_h){
route_layer.out_c += next.out_c;
}else{
route_layer.out_h = route_layer.out_w = route_layer.out_c = 0;
}
}
return route_layer;
}
void softmax(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
}
}
void forward_region_layer(const layer l, network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
//if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax){
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
}
char line[256];
FILE *fp3;
char filename[256];
sprintf(filename, "yolo_layer_%d.txt", 123123);
printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
int x;
for( x = 0; x < l.outputs; x++)
{
sprintf(line, "%f\n", net.input[x]);
if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
}
fclose(fp3);
#endif
if(!net.train) return;
}
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.biases = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30*(l.coords + 1);
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
layer parse_region(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
l.sqrt = option_find_int_quiet(options, "sqrt", 0);
l.softmax = option_find_int(options, "softmax", 0);
l.background = option_find_int_quiet(options, "background", 0);
l.max_boxes = option_find_int_quiet(options, "max",30);
l.jitter = option_find_float(options, "jitter", .2);
l.rescore = option_find_int_quiet(options, "rescore",0);
l.thresh = option_find_float(options, "thresh", .5);
l.classfix = option_find_int_quiet(options, "classfix", 0);
l.absolute = option_find_int_quiet(options, "absolute", 0);
l.random = option_find_int_quiet(options, "random", 0);
l.coord_scale = option_find_float(options, "coord_scale", 1);
l.object_scale = option_find_float(options, "object_scale", 1);
l.noobject_scale = option_find_float(options, "noobject_scale", 1);
l.mask_scale = option_find_float(options, "mask_scale", 1);
l.class_scale = option_find_float(options, "class_scale", 1);
l.bias_match = option_find_int_quiet(options, "bias_match",0);
char *tree_file = option_find_str(options, "tree", 0);
// if (tree_file) l.softmax_tree = read_tree(tree_file);
char *map_file = option_find_str(options, "map", 0);
// if (map_file) l.map = read_map(map_file);
char *a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int out_c = c/(stride*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int in_index = i + w*(j + h*(k + c*b));
int c2 = k % out_c;
int offset = k / out_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
}
}
}
}
}
void forward_reorg_layer(const layer l, network net)
{
int i;
//if(l.flatten){
// memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
// if(l.reverse){
// flatten(l.output, l.w*l.h, l.c, l.batch, 0);
// }else{
// flatten(l.output, l.w*l.h, l.c, l.batch, 1);
// }
//} else if (l.extra) {
// for(i = 0; i < l.batch; ++i){
// copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
// }
//} else if (l.reverse){
// reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
//} else {
reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
//}
}
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REORG;
l.batch = batch;
l.stride = stride;
l.extra = extra;
l.h = h;
l.w = w;
l.c = c;
l.flatten = flatten;
if(reverse){
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c/(stride*stride);
}else{
l.out_w = w/stride;
l.out_h = h/stride;
l.out_c = c*(stride*stride);
}
l.reverse = reverse;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
if(l.extra){
l.out_w = l.out_h = l.out_c = 0;
l.outputs = l.inputs + l.extra;
}
if(extra){
fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
} else {
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
}
int output_size = l.outputs * batch;
//l.output = (float *)calloc(output_size, sizeof(float));
l.forward = forward_reorg_layer;
return l;
}
layer parse_reorg(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int reverse = option_find_int_quiet(options, "reverse",0);
int flatten = option_find_int_quiet(options, "flatten",0);
int extra = option_find_int_quiet(options, "extra",0);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before reorg layer must output image.");
layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
return layer;
}
void forward_maxpool_layer(layer l, network net)
{
int b,i,j,k,m,n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for(b = 0; b < l.batch; ++b){
for(k = 0; k < c; ++k){
for(i = 0; i < h; ++i){
for(j = 0; j < w; ++j){
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for(n = 0; n < l.size; ++n){
for(m = 0; m < l.size; ++m){
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? net.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
}
//layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
//{
// layer l;
// memset(&l,0,sizeof(layer));
// l.type = MAXPOOL;
// l.batch = batch;
// l.h = h;
// l.w = w;
// l.c = c;
// l.pad = padding;
// l.out_w = (w + 2*padding)/stride;
// l.out_h = (h + 2*padding)/stride;
// l.out_c = c;
// l.outputs = l.out_h * l.out_w * l.out_c;
// l.inputs = h*w*c;
// l.size = size;
// l.stride = stride;
// int output_size = l.out_h * l.out_w * l.out_c * batch;
// //l.indexes = (int *)calloc(output_size, sizeof(int));
// //l.output = (float *)calloc(output_size, sizeof(float));
// l.forward = forward_maxpool_layer;
//
// fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
// return l;
//}
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = MAXPOOL;
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.out_w = (w + padding - size)/stride + 1;
l.out_h = (h + padding - size)/stride + 1;
l.out_c = c;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride;
int output_size = l.out_h * l.out_w * l.out_c * batch;
//l.indexes = calloc(output_size, sizeof(int));
//l.output = calloc(output_size, sizeof(float));
//l.delta = calloc(output_size, sizeof(float));
l.forward = forward_maxpool_layer;
//l.backward = backward_maxpool_layer;
fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
int padding = option_find_int_quiet(options, "padding", size-1);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
return maxpool_layer;
}
learning_rate_policy get_policy(char *s)
{
if (strcmp(s, "random")==0) return RANDOM;
if (strcmp(s, "poly")==0) return POLY;
if (strcmp(s, "constant")==0) return CONSTANT;
if (strcmp(s, "step")==0) return STEP;
if (strcmp(s, "exp")==0) return EXP;
if (strcmp(s, "sigmoid")==0) return SIG;
if (strcmp(s, "steps")==0) return STEPS;
fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
return CONSTANT;
}
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions",1);
net->time_steps = option_find_int_quiet(options, "time_steps",1);
net->notruth = option_find_int_quiet(options, "notruth",0);
net->batch /= subdivs;
net->batch *= net->time_steps;
net->subdivisions = subdivs;
net->random = option_find_int_quiet(options, "random", 0);
net->adam = option_find_int_quiet(options, "adam", 0);
if(net->adam){
net->B1 = option_find_float(options, "B1", .9);
net->B2 = option_find_float(options, "B2", .999);
net->eps = option_find_float(options, "eps", .0000001);
}
net->h = option_find_int_quiet(options, "height",0);
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w);
net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w);
net->center = option_find_int_quiet(options, "center",0);
net->clip = option_find_float_quiet(options, "clip", 0);
net->angle = option_find_float_quiet(options, "angle", 0);
net->aspect = option_find_float_quiet(options, "aspect", 1);
net->saturation = option_find_float_quiet(options, "saturation", 1);
net->exposure = option_find_float_quiet(options, "exposure", 1);
net->hue = option_find_float_quiet(options, "hue", 0);
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
net->burn_in = option_find_int_quiet(options, "burn_in", 0);
net->power = option_find_float_quiet(options, "power", 4);
if(net->policy == STEP){
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
} else if (net->policy == STEPS){
char *l = option_find(options, "steps");
char *p = option_find(options, "scales");
if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
int len = strlen(l);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *steps = (int *)calloc(n, sizeof(int));
float *scales = (float *)calloc(n, sizeof(float));
for(i = 0; i < n; ++i){
int step = atoi(l);
float scale = atof(p);
l = strchr(l, ',')+1;
p = strchr(p, ',')+1;
steps[i] = step;
scales[i] = scale;
}
net->scales = scales;
net->steps = steps;
net->num_steps = n;
} else if (net->policy == EXP){
net->gamma = option_find_float(options, "gamma", 1);
} else if (net->policy == SIG){
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
} else if (net->policy == POLY || net->policy == RANDOM){
}
net->max_batches = option_find_int(options, "max_batches", 0);
}
int is_network(section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
network *parse_network_cfg(char *filename)
{
list *sections = read_cfg(filename);
node *n = sections->front;
if(!n) error("Config file has no sections");
network *net = make_network(sections->size - 1);
net->gpu_index = -1;
size_params params;
section *s = (section *)n->val;
list *options = s->options;
if(!is_network(s)) error("First section must be [net] or [network]");
parse_net_options(options, net);
params.h = net->h;
params.w = net->w;
params.c = net->c;
params.inputs = net->inputs;
params.batch = net->batch;
params.time_steps = net->time_steps;
params.net = net;
size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
fprintf(stderr, "layer filters size input output\n");
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
//layer l = {0};
layer l;
memset(&l,0,sizeof(layer));
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == ROUTE){
l = parse_route(options, params, net);
}else if(lt == UPSAMPLE){
l = parse_upsample(options, params, net);
}else if(lt == SHORTCUT){
l = parse_shortcut(options, params, net);
}else if(lt == REGION){
l = parse_region(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == MAXPOOL){
l = parse_maxpool(options, params);
}else if(lt == REORG){
l = parse_reorg(options, params);
}else{
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
l.clip = net->clip;
l.truth = option_find_int_quiet(options, "truth", 0);
l.onlyforward = option_find_int_quiet(options, "onlyforward", 0);
l.stopbackward = option_find_int_quiet(options, "stopbackward", 0);
l.dontsave = option_find_int_quiet(options, "dontsave", 0);
// l.dontload = option_find_int_quiet(options, "dontload", 0);
// l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
//l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1);
l.smooth = option_find_float_quiet(options, "smooth", 0);
option_unused(options);
net->layers[count] = l;
if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
if(n){
params.h = l.out_h;
params.w = l.out_w;
params.c = l.out_c;
params.inputs = l.outputs;
}
}
free_list(sections);
layer out = get_network_output_layer(net);
net->outputs = out.outputs;
net->output = out.output;
//net->input = (float *)calloc(net->inputs*net->batch, sizeof(float));
workspace_size = 0;//donot calloc workspace
//if(workspace_size){
// //printf("%ld\n", workspace_size);
// net->workspace = (float *)calloc(1, workspace_size);
//}
return net;
}
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
section *current = 0;
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '[':
current = (section *)malloc(sizeof(section));
list_insert(options, current);
current->options = make_list();
current->type = line;
break;
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, current->options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
void load_convolutional_weights(layer l, FILE *fp)
{
int num = l.nweights;
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize){
fread(l.scales, sizeof(float), l.n, fp);
fread(l.rolling_mean, sizeof(float), l.n, fp);
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.weights, sizeof(float), num, fp);
}
void load_weights_upto(network *net, char *filename, int start, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
int major;
int minor;
int revision;
fread(&major, sizeof(int), 1, fp);
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0
printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000);
if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){
//fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
}else {
int iseen = 0;
fread(&iseen, sizeof(int), 1, fp);
*net->seen = iseen;
}
//printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4
int i;
for(i = start; i < net->n && i < cutoff; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
load_convolutional_weights(l, fp);
}
}
fprintf(stderr, "Done!\n");
fclose(fp);
}
void load_weights(network *net, char *filename)
{
load_weights_upto(net, filename, 0, net->n);
}
/////////////////praser end
/////////////////network begin
load_args get_base_args(network *net)
{
load_args args = {0};
args.w = net->w;
args.h = net->h;
args.size = net->w;
args.min = net->min_crop;
args.max = net->max_crop;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.center = net->center;
args.saturation = net->saturation;
args.hue = net->hue;
return args;
}
network *load_network(char *cfg, char *weights, int clear)
{
network *net = parse_network_cfg(cfg);
//if(weights && weights[0] != 0){
// load_weights(net, weights);
//}
if(clear) (*net->seen) = 0;
return net;
}
char *get_layer_string(LAYER_TYPE a)
{
switch(a){
case CONVOLUTIONAL:
return "convolutional";
case ACTIVE:
return "activation";
case LOCAL:
return "local";
case DECONVOLUTIONAL:
return "deconvolutional";
case CONNECTED:
return "connected";
case RNN:
return "rnn";
case GRU:
return "gru";
case LSTM:
return "lstm";
case CRNN:
return "crnn";
case MAXPOOL:
return "maxpool";
case REORG:
return "reorg";
case AVGPOOL:
return "avgpool";
case SOFTMAX:
return "softmax";
case DETECTION:
return "detection";
case REGION:
return "region";
case YOLO:
return "yolo";
case DROPOUT:
return "dropout";
case CROP:
return "crop";
case COST:
return "cost";
case ROUTE:
return "route";
case SHORTCUT:
return "shortcut";
case NORMALIZATION:
return "normalization";
case BATCHNORM:
return "batchnorm";
default:
break;
}
return "none";
}
network *make_network(int n)
{
network *net = (network *)calloc(1, sizeof(network));
net->n = n;
net->layers = (layer *)calloc(net->n, sizeof(layer));
net->seen = (size_t *)calloc(1, sizeof(size_t));
net->t = (int *)calloc(1, sizeof(int));
net->cost = (float *)calloc(1, sizeof(float));
return net;
}
void forward_network(network *netp)
{
network net = *netp;
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
l.forward(l, net);
net.input = l.output;
// printf("layer [%d]\n",i);
}
}
void set_temp_network(network *net, float t)
{
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].temperature = t;
}
}
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].batch = b;
}
}
float *network_predict(network *net, float *input)
{
network orig = *net;
net->input = input;
net->truth = 0;
net->train = 0;
net->delta = 0;
forward_network(net);
float *out = net->output;
*net = orig;
return out;
}
int yolo_num_detections(layer l, float thresh)
{
int i, n;
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
if(l.output[obj_index] > thresh){
++count;
}
}
}
return count;
}
int num_detections(network *net, float thresh)
{
int i;
int s = 0;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == YOLO){
s += yolo_num_detections(l, thresh);
}
if(l.type == DETECTION || l.type == REGION){
s += l.w*l.h*l.n;
}
}
return s;
}
detection *make_network_boxes(network *net, float thresh, int *num)
{
layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
//printf("num_detections nboxes = %d\n",nboxes);
if(num) *num = nboxes;
detection *dets = (detection *)calloc(nboxes, sizeof(detection));
for(i = 0; i < nboxes; ++i){
dets[i].prob = (float *)calloc(l.classes, sizeof(float));
}
return dets;
}
box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / lw;
b.y = (j + x[index + 1*stride]) / lh;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
{
int i,j,n;
float *predictions = l.output;
// if (l.batch == 2) avg_flipped_yolo(l);
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float objectness = predictions[obj_index];
if(objectness <= thresh) continue;
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
dets[count].objectness = objectness;
dets[count].classes = l.classes;
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
float prob = objectness*predictions[class_index];
dets[count].prob[j] = (prob > thresh) ? prob : 0;
}
++count;
}
}
correct_yolo_boxes(dets, count, w, h, netw, neth, relative);
return count;
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + l.coords + 1; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
for(j = 0; j < l.classes; ++j){
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if(dets[index].mask){
for(j = 0; j < l.coords - 4; ++j){
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if(dets[index].objectness){
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
{
int j;
for(j = 0; j < net->n; ++j){
layer l = net->layers[j];
if(l.type == YOLO){
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
dets += count;
}
if(l.type == REGION){
get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
dets += l.w*l.h*l.n;
}
}
}
detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
return dets;
}
void free_detections(detection *dets, int n)
{
int i;
for(i = 0; i < n; ++i){
free(dets[i].prob);
if(dets[i].mask) free(dets[i].mask);
}
free(dets);
}
int network_width(network *net){return net->w;}
int network_height(network *net){return net->h;}
layer get_network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
void free_network(network *net)
{
int i;
for(i = 0; i < net->n; ++i){
free_layer(net->layers[i]);
}
free(net->layers);
if(net->input) free(net->input);
if(net->truth) free(net->truth);
free(net);
}
layer network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
int network_inputs(network *net)
{
return net->layers[0].inputs;
}
int network_outputs(network *net)
{
return network_output_layer(net).outputs;
}
float *network_output(network *net)
{
return network_output_layer(net).output;
}
//////////////////network end
//////////////////////box begin
int nms_comparator(const void *pa, const void *pb)
{
detection a = *(detection *)pa;
detection b = *(detection *)pb;
float diff = 0;
if(b.sort_class >= 0){
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if(diff < 0) return 1;
else if(diff > 0) return -1;
return 0;
}
float overlap(float x1, float w1, float x2, float w2)
{
float l1 = x1 - w1/2;
float l2 = x2 - w2/2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1/2;
float r2 = x2 + w2/2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
float box_intersection(box a, box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
if(w < 0 || h < 0) return 0;
float area = w*h;
return area;
}
float box_union(box a, box b)
{
float i = box_intersection(a, b);
float u = a.w*a.h + b.w*b.h - i;
return u;
}
float box_iou(box a, box b)
{
return box_intersection(a, b)/box_union(a, b);
}
void do_nms_sort(detection *dets, int total, int classes, float thresh)
{
int i, j, k;
k = total-1;
for(i = 0; i <= k; ++i){
if(dets[i].objectness == 0){
detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k+1;
for(k = 0; k < classes; ++k){
for(i = 0; i < total; ++i){
dets[i].sort_class = k;
}
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].prob[k] == 0) continue;
box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].prob[k] = 0;
}
}
}
}
}
//////////////////////box end
//////////////////////image begin
float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
float get_color(int c, int x, int max)
{
float ratio = ((float)x/max)*5;
int i = floor(ratio);
int j = ceil(ratio);
ratio -= i;
float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
//printf("%f\n", r);
return r;
}
static float get_pixel_extend(image m, int x, int y, int c)
{
if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
/*
if(x < 0) x = 0;
if(x >= m.w) x = m.w-1;
if(y < 0) y = 0;
if(y >= m.h) y = m.h-1;
*/
if(c < 0 || c >= m.c) return 0;
return get_pixel(m, x, y, c);
}
void composite_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x, y, k);
float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
set_pixel(dest, dx+x, dy+y, k, val * val2);
}
}
}
}
image border_image(image a, int border)
{
image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
int x,y,k;
for(k = 0; k < b.c; ++k){
for(y = 0; y < b.h; ++y){
for(x = 0; x < b.w; ++x){
float val = get_pixel_extend(a, x - border, y - border, k);
if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
set_pixel(b, x, y, k, val);
}
}
}
return b;
}
image copy_image(image p)
{
image copy = p;
copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float));
memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
return copy;
}
image tile_images(image a, image b, int dx)
{
if(a.w == 0) return copy_image(b);
image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
embed_image(a, c, 0, 0);
composite_image(b, c, a.w + dx, 0);
return c;
}
image get_label(image **characters, char *string, int size)
{
size = size/10;
if(size > 7) size = 7;
image label = make_empty_image(0,0,0);
while(*string){
image l = characters[size][(int)*string];
image n = tile_images(label, l, -size - 1 + (size+1)/2);
free_image(label);
label = n;
++string;
}
image b = border_image(label, label.h*.25);
free_image(label);
return b;
}
void draw_label(image a, int r, int c, image label, const float *rgb)
{
int w = label.w;
int h = label.h;
if (r - h >= 0) r = r - h;
int i, j, k;
for(j = 0; j < h && j + r < a.h; ++j){
for(i = 0; i < w && i + c < a.w; ++i){
for(k = 0; k < label.c; ++k){
float val = get_pixel(label, i, j, k);
set_pixel(a, i+c, j+r, k, rgb[k] * val);
}
}
}
}
void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
//normalize_image(a);
int i;
if(x1 < 0) x1 = 0;
if(x1 >= a.w) x1 = a.w-1;
if(x2 < 0) x2 = 0;
if(x2 >= a.w) x2 = a.w-1;
if(y1 < 0) y1 = 0;
if(y1 >= a.h) y1 = a.h-1;
if(y2 < 0) y2 = 0;
if(y2 >= a.h) y2 = a.h-1;
for(i = x1; i <= x2; ++i){
a.data[i + y1*a.w + 0*a.w*a.h] = r;
a.data[i + y2*a.w + 0*a.w*a.h] = r;
a.data[i + y1*a.w + 1*a.w*a.h] = g;
a.data[i + y2*a.w + 1*a.w*a.h] = g;
a.data[i + y1*a.w + 2*a.w*a.h] = b;
a.data[i + y2*a.w + 2*a.w*a.h] = b;
}
for(i = y1; i <= y2; ++i){
a.data[x1 + i*a.w + 0*a.w*a.h] = r;
a.data[x2 + i*a.w + 0*a.w*a.h] = r;
a.data[x1 + i*a.w + 1*a.w*a.h] = g;
a.data[x2 + i*a.w + 1*a.w*a.h] = g;
a.data[x1 + i*a.w + 2*a.w*a.h] = b;
a.data[x2 + i*a.w + 2*a.w*a.h] = b;
}
}
void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
{
int i;
for(i = 0; i < w; ++i){
draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
}
}
image float_to_image(int w, int h, int c, float *data)
{
image out = make_empty_image(w,h,c);
out.data = data;
return out;
}
image threshold_image(image im, float thresh)
{
int i;
image t = make_image(im.w, im.h, im.c);
for(i = 0; i < im.w*im.h*im.c; ++i){
t.data[i] = im.data[i]>thresh ? 1 : 0;
}
return t;
}
void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
{
int i,j;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class_t = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
if (class_t < 0) {
strcat(labelstr, names[j]);
class_t = j;
} else {
strcat(labelstr, ", ");
strcat(labelstr, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
if(class_t >= 0){
int width = im.h * .006;
//printf("%d %s: %.0f%%\n", i, names[class], prob*100);
int offset = class_t*123457 % classes;
float red = get_color(2,offset,classes);
float green = get_color(1,offset,classes);
float blue = get_color(0,offset,classes);
float rgb[3];
//width = prob*20+2;
rgb[0] = red;
rgb[1] = green;
rgb[2] = blue;
box b = dets[i].bbox;
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
int left = (b.x-b.w/2.)*im.w;
int right = (b.x+b.w/2.)*im.w;
int top = (b.y-b.h/2.)*im.h;
int bot = (b.y+b.h/2.)*im.h;
if(left < 0) left = 0;
if(right > im.w-1) right = im.w-1;
if(top < 0) top = 0;
if(bot > im.h-1) bot = im.h-1;
draw_box_width(im, left, top, right, bot, width, red, green, blue);
if (alphabet) {
image label = get_label(alphabet, labelstr, (im.h*.03));
draw_label(im, top + width, left, label, rgb);
free_image(label);
}
if (dets[i].mask){
image mask = float_to_image(14, 14, 1, dets[i].mask);
image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
image tmask = threshold_image(resized_mask, .5);
embed_image(tmask, im, left, top);
free_image(mask);
free_image(resized_mask);
free_image(tmask);
}
}
}
}
//////////////////////image end
///////////////////////////////////////////////////////////////////////20181108 reorg WeightQ BetaQ ok InputQ ok start
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
#define S 2
#define K 3
#define Tn 4
#define Tm 32
#define Tr 26
#define Tc 26
#define OnChipIB_Width ((Tc-1)*S+K)
#define OnChipIB_Height ((Tr-1)*S+K)
#define MAX_BETA_LENGTH (1024)
//////////////////////////////////////////////////T3 start
void input_load(float *input,float input_buffer[Tn][OnChipIB_Height][OnChipIB_Width],int r,int c,int n,int Kernel_stride,int Padding,int TRow,int TCol,int Input_w,int Input_h,int TN_MIN,int IHxIW,int LayerType)
{
int t1,t2,t3,t4;
int xoffset;
int yoffset;
static float input_memcpy_buffer[Tn*OnChipIB_Height*OnChipIB_Width];
const int Coffset = c*Kernel_stride - Padding;
const int Roffset = r*Kernel_stride - Padding;
const int CurrentOffset = n*IHxIW + Roffset*Input_w + Coffset;
float pad_value = 0;
if(LayerType==1)
pad_value = -1024*1024;
int input_mmcpy_offset = 0;
for(t1 = 0;t1 < TN_MIN; t1++)
for(t2 = 0;t2 < TRow; t2++)
{
memcpy((float *)(input_memcpy_buffer + input_mmcpy_offset),(float *)(input + CurrentOffset + t1*IHxIW + t2*Input_w),TCol*sizeof(float));
input_mmcpy_offset += TCol;
}
input_mmcpy_offset = 0;
for(t1 = 0;t1 < Tn; t1++)
for(t2 = 0;t2 < TRow; t2++)
for(t3 = 0;t3 < TCol; t3++)
{
xoffset = Coffset + t3;
yoffset = Roffset + t2;
bool XEnable = (xoffset >= 0)&&(xoffset < Input_w);
bool YEnable = (yoffset >= 0)&&(yoffset < Input_h);
bool PaddingEnable = XEnable&&YEnable;
if(PaddingEnable&&(t1 < TN_MIN))
input_buffer[t1][t2][t3] = input_memcpy_buffer[input_mmcpy_offset];
else
input_buffer[t1][t2][t3] = pad_value;
input_mmcpy_offset++;
}
}
void weight_load_reorg(int *Weight,float weight_buffer[Tm][Tn][K][K],bool weight_load_enable,int m,int n,int IFM_numxKxK,int KxK,int Kernel_size,int TM_MIN,int TN_MIN,const int WeightQ)
{
int t1,t2,t3,t4;
static int weight_memcpy_buffer[Tm*Tn*K*K/2];
static int Woffset;
if(!weight_load_enable)
return;
if(m==0&&n==0)
Woffset = 0;
if((TM_MIN*TN_MIN*KxK)%2)
printf("weight % error\n");
memcpy(weight_memcpy_buffer,(int *)(Weight + Woffset),TM_MIN*TN_MIN*KxK/2*sizeof(int));
Woffset += TM_MIN*TN_MIN*KxK/2;
int weight_memcpy_offset = 0;
int cnt = 0;
short input_array[2];
float input_value;
for(t3 = 0;t3 <Kernel_size; t3++)
for(t4 = 0;t4 <Kernel_size; t4++)
for(t1 = 0;t1 < Tm; t1++)
for(t2 = 0;t2 < Tn; t2++)
{
bool Enable = (t1 < TM_MIN)&&(t2 < TN_MIN);
if(Enable)
{
if(cnt==0)
{
input_array[0] = weight_memcpy_buffer[weight_memcpy_offset];
input_array[1] = weight_memcpy_buffer[weight_memcpy_offset] >> 16;
weight_memcpy_offset++;
}
input_value = input_array[cnt]*pow(2.0,-WeightQ);
weight_buffer[t1][t2][t3][t4] = input_value;
cnt++;
if(cnt==2)
cnt = 0;
}
else
weight_buffer[t1][t2][t3][t4] = 0;
}
}
void copy_input_weight(float *input,int *Weight,int InFM_num,int Input_w,int Input_h,int OutFM_num,int Kernel_size,int Kernel_stride,int r,int c,int m,int n,
int TM_MIN,int TN,int TRow,int TCol,int Padding,float input_buffer[Tn][OnChipIB_Height][OnChipIB_Width],float weight_buffer[Tm][Tn][K][K],int TMP_N_next[1],
bool enable,bool weight_load_enable,bool initialize,const int IHxIW,const int KxK,const int IFM_numxKxK,const int LayerType,const int WeightQ)
{
if(!enable)
return ;
const int TN_MIN = MIN(TN,InFM_num - n);
TMP_N_next[0] = n;
input_load(input, input_buffer, r, c, n, Kernel_stride, Padding, TRow, TCol, Input_w, Input_h, TN_MIN, IHxIW, LayerType);
weight_load_reorg(Weight,weight_buffer,weight_load_enable,m,n,IFM_numxKxK,KxK,Kernel_size,TM_MIN,TN_MIN,WeightQ);
}
void copy_local_beta(float beta_buffer[MAX_BETA_LENGTH],float local_beta_buffer[MAX_BETA_LENGTH],const int TM_MIN,int m)
{
int offset;
int tm;
for(tm = 0,offset = m;tm < TM_MIN;tm++)
{
local_beta_buffer[tm] = beta_buffer[offset];
offset++;
}
}
void nonlinear_leaky(float Input[Tm][Tr][Tc],const int TM_MIN,const int TR_MIN,const int TC_MIN,const bool IsNL)
{
int tr,tc,tm;
if(!IsNL)
return ;
for(tm = 0;tm < TM_MIN;tm++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=1
for(tr = 0;tr < TR_MIN;tr++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=14
for(tc = 0;tc < TC_MIN;tc++)
{
#pragma HLS LOOP_TRIPCOUNT min=14 max=14
#pragma HLS PIPELINE
float tmp = Input[tm][tr][tc];
if(tmp < 0)
Input[tm][tr][tc] = tmp*0.1;
}
}
void compute(float input_buffer[Tn][OnChipIB_Height][OnChipIB_Width],float output_buffer[Tm][Tr][Tc],
float weight_buffer[Tm][Tn][K][K],float beta_buffer[MAX_BETA_LENGTH],int TMP_N_next[1],
const int Kernel_size,const int Kernel_stride,int TMP_M,
const int TM_MIN,const int TR_MIN,const int TC_MIN,bool enable,const bool IsNL,const bool reluenable)
{
static float local_beta_buffer[Tm];
#pragma HLS ARRAY_PARTITION variable=local_beta_buffer complete dim=1
if(!enable)
{
copy_local_beta(beta_buffer,local_beta_buffer,TM_MIN,TMP_M);
return;
}
int i,j,tr,tc,tm,tn;
int n = TMP_N_next[0];
float partial_mul[Tm][Tn];
float partial_add[Tm];
for(i =0;i < Kernel_size; i++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=5
for(j = 0;j < Kernel_size; j++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=5
for(tr = 0;tr < TR_MIN;tr++)
#pragma HLS LOOP_TRIPCOUNT min=14 max=14
for(tc = 0;tc < TC_MIN;tc++)
{
#pragma HLS LOOP_TRIPCOUNT min=14 max=14
#pragma HLS PIPELINE
for(tm = 0;tm < Tm;tm++)
{
if(i==0&&j==0&&n==0)
partial_add[tm] = local_beta_buffer[tm];
else
partial_add[tm] = output_buffer[tm][tr][tc];
}
for(tm = 0;tm < Tm;tm++)
for(tn = 0;tn <Tn;tn++)
{
partial_mul[tm][tn] = weight_buffer[tm][tn][i][j]*input_buffer[tn][Kernel_stride*tr+i][Kernel_stride*tc+j];
}
for(tm = 0;tm < Tm;tm++)
{
float partial_sum = 0;
for(tn = 0;tn <Tn;tn++)
{
partial_sum += partial_mul[tm][tn];
}
output_buffer[tm][tr][tc] = partial_add[tm] + partial_sum;
}
}
if(reluenable)
nonlinear_leaky(output_buffer,TM_MIN,TR_MIN,TC_MIN,IsNL);
}
void write_back_output_reorg(float output_buffer[Tm][Tr][Tc],float *Output,int r,int c,int m,const int Output_w,const int Output_h,
const int TM_MIN,const int TR_MIN,const int TC_MIN,const int OHxOW,bool write_flag)
{
if(!write_flag)
return;
const int offset = m*OHxOW + r*Output_w + c;
int tr,tm;
//for(tm = 0;tm < TM_MIN;tm++)
// for(tr = 0;tr < TR_MIN;tr++)
// for(tc = 0;tc < TC_MIN;tc++)
// {
// Output[tm*OHxOW + tr*Output_w + tc + offset] = output_buffer[tm][tr][tc];
// }
for(tm = 0;tm < TM_MIN;tm++)
for(tr = 0;tr < TR_MIN;tr++)
{
memcpy((float *)(Output + tm*OHxOW + tr*Output_w + offset),output_buffer[tm][tr],TC_MIN*sizeof(float));
}
}
void pool_yolo2(float Input[Tn][OnChipIB_Height][OnChipIB_Width],float Output[Tm][Tr][Tc],
const int Kernel_size,const int Kernel_stride,
const int TM_MIN,const int TR_MIN,const int TC_MIN,bool enable)
{
if(!enable)
return;
int i,j,tr,tc,of;
float tmp[Tn];
for(tr = 0;tr < TR_MIN;tr++)
for(tc = 0;tc < TC_MIN;tc++)
for(i =0;i < Kernel_size; i++)
for(j = 0;j < Kernel_size; j++)
{
#pragma HLS PIPELINE
for( of = 0; of < Tn; of++)
{
if(i==0&&j==0)
tmp[of] = -1024*1024;
if(Input[of][tr*Kernel_stride+i][tc*Kernel_stride+j] > tmp[of])
tmp[of] = Input[of][tr*Kernel_stride+i][tc*Kernel_stride+j];
if(i==1&&j==1)
Output[of][tr][tc] = tmp[of];
}
}
}
void reorg_yolo2(float Input[Tn][OnChipIB_Height][OnChipIB_Width],float Output[Tm][Tr][Tc],
const int Kernel_size,const int Kernel_stride,
const int TM_MIN,const int TR_MIN,const int TC_MIN,bool enable)
{
int x, y,kx,ky;
unsigned char Yoffset;
unsigned char Xoffset;
if(!enable)
return;
for( y = 0; y < TR_MIN; y++)
for( x = 0; x < TC_MIN; x++)
for(ky= 0;ky < 2; ky++)
for(kx = 0;kx < 2; kx++)
{
#pragma HLS PIPELINE
Yoffset = (y << 1) + ky;
Xoffset = (x << 1) + kx;
int in_index = (ky << 1) + kx;
Output[in_index][y][x] = Input[0][Yoffset][Xoffset];
}
}
void intra_pingpong_wrapper(float *Input,int *Weight, float output_buffer[Tm][Tr][Tc],float beta_buffer[MAX_BETA_LENGTH],
float input_buffer0[Tn][OnChipIB_Height][OnChipIB_Width],float input_buffer1[Tn][OnChipIB_Height][OnChipIB_Width],
int InFM_num,int Input_w,int Input_h,int OutFM_num,int Kernel_size,int Kernel_stride,
int TMP_R,int TMP_C,int TMP_M,int m,int TM_MIN,int TR_MIN,int TC_MIN,int TN,int TRow,int TCol,int Padding,
int IHxIW,int KxK,int IFM_numxKxK,int nLoops,bool IsNL,int LayerType,int TM,int TMP_X_next[1],int TX_MIN_next[1],bool pingpongx,bool input_flag,bool process_flag,
int WeightQ)
{
static float weight_buffer0[Tm][Tn][K][K];
#pragma HLS ARRAY_PARTITION variable=weight_buffer0 complete dim=1
#pragma HLS ARRAY_PARTITION variable=weight_buffer0 complete dim=2
static float weight_buffer1[Tm][Tn][K][K];
#pragma HLS ARRAY_PARTITION variable=weight_buffer1 complete dim=1
#pragma HLS ARRAY_PARTITION variable=weight_buffer1 complete dim=2
static int NOP[1];
static int tmp_x;
static int tmp_tx_min;
if(LayerType==0)
{
if(!input_flag)
return;
TMP_X_next[0] = TMP_M;//consider by the inner-out loop
TX_MIN_next[0] = TM_MIN;// like above
bool pingpong = 0;
int TMP_N_next0[1];
int TMP_N_next1[1];
int n;
int TMP_N;
for(TMP_N = 0,n = 0;n < nLoops+1; n++,TMP_N += TN)
{
if(pingpong == 1)
{
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_N,
TM_MIN,TN,TRow,TCol,Padding,input_buffer1,weight_buffer1,TMP_N_next1,n!=nLoops,1,(m==0)&&(n==0),IHxIW,KxK,IFM_numxKxK,LayerType,WeightQ);
compute(input_buffer0,output_buffer,weight_buffer0,beta_buffer,TMP_N_next0,Kernel_size,Kernel_stride,TMP_M,TM_MIN,TR_MIN,TC_MIN,n!=0,IsNL,n==nLoops);
pingpong = 0;
}else
{
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_N,
TM_MIN,TN,TRow,TCol,Padding,input_buffer0,weight_buffer0,TMP_N_next0,n!=nLoops,1,(m==0)&&(n==0),IHxIW,KxK,IFM_numxKxK,LayerType,WeightQ);
compute(input_buffer1,output_buffer,weight_buffer1,beta_buffer,TMP_N_next1,Kernel_size,Kernel_stride,TMP_M,TM_MIN,TR_MIN,TC_MIN,n!=0,IsNL,n==nLoops);
pingpong = 1;
}
}
}
else if(LayerType==1)
{
if(pingpongx==0)
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer0,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType,WeightQ);
pool_yolo2(input_buffer1,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}else
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer1,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType,WeightQ);
pool_yolo2(input_buffer0,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}
}
else if(LayerType==2)
{
if(pingpongx==0)
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer0,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType,WeightQ);
reorg_yolo2(input_buffer1,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}else
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer1,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType,WeightQ);
reorg_yolo2(input_buffer0,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}
}
}
void copy_beta(float beta_buffer[MAX_BETA_LENGTH],int *Beta,const int OFM_NUM,const int BetaQ)
{
static int beta_tmp[MAX_BETA_LENGTH/2];
int NUM = (OFM_NUM+1)/2;
memcpy(beta_tmp,(int *)Beta,NUM*sizeof(int));
int x;
for(x = 0;x < NUM;x++)
{
beta_buffer[2*x] = ((short)(beta_tmp[x]))*pow(2.0,-BetaQ);
beta_buffer[2*x+1] = ((short)(beta_tmp[x]>>16))*pow(2.0,-BetaQ);
}
}
void YOLO2_FPGA(float *Input,float *Output,int *Weight,int *Beta,const int InFM_num,const int OutFM_num,
const int Kernel_size,const int Kernel_stride,
const int Input_w,const int Input_h,const int Padding,const bool IsNL,const bool IsBN,
const int TM,const int TN,const int TR,const int TC,
const int mLoops,const int nLoops,const int rLoops,const int cLoops,const int LayerType,
const int WeightQ,const int BetaQ)
{
//const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
//const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int output_w = (Input_w - Kernel_size + (Padding << 1))/Kernel_stride + 1 ;
int output_h = (Input_h - Kernel_size + (Padding << 1))/Kernel_stride + 1 ;
if(LayerType==1)
{
output_w = (Input_w - 1)/Kernel_stride + 1 ;
output_h = (Input_h - 1)/Kernel_stride + 1 ;
}
const int OHxOW = output_h*output_w;
const int TRow = (TR-1)*Kernel_stride+Kernel_size;
const int TCol = (TC-1)*Kernel_stride+Kernel_size;
const int IHxIW = Input_h*Input_w;
const int KxK = Kernel_size*Kernel_size;
const int IFM_numxKxK = InFM_num*KxK;
const int mLoops_bound = LayerType ? (mLoops + 2): (mLoops + 1);
static float input_buffer0[Tn][OnChipIB_Height][OnChipIB_Width];
#pragma HLS ARRAY_PARTITION variable=input_buffer0 complete dim=1
static float input_buffer1[Tn][OnChipIB_Height][OnChipIB_Width];
#pragma HLS ARRAY_PARTITION variable=input_buffer1 complete dim=1
static float output_buffer[Tm][Tr][Tc];
#pragma HLS ARRAY_PARTITION variable=output_buffer complete dim=1
static float output_buffer1[Tm][Tr][Tc];
#pragma HLS ARRAY_PARTITION variable=output_buffer1 complete dim=1
static float beta_buffer[MAX_BETA_LENGTH];
int r,c,m;
/////////////////////////////////param
int TMP_R,TMP_C,TMP_M;
int TM_MIN,TR_MIN,TC_MIN;
///////////////////////////////////////
int TMP_M_next0[1];
int TMP_M_next1[1];
int TM_MIN_next0[1];
int TM_MIN_next1[1];
bool pingpongm;
if(LayerType==0)
copy_beta(beta_buffer,Beta,OutFM_num,BetaQ);
for(TMP_R = 0,r = 0; r < rLoops; r++, TMP_R += TR)
{
TR_MIN = MIN(TR,output_h -TMP_R);
for(TMP_C = 0,c = 0; c < cLoops; c++,TMP_C += TC)
{
TC_MIN = MIN(TC,output_w -TMP_C);
pingpongm = 0;
for(TMP_M = 0, m = 0; m < mLoops_bound; m++,TMP_M += TM)
{
TM_MIN = MIN(TM,OutFM_num-TMP_M);
bool MneZero = (m!=0);
bool MneOne = (m!=1);
bool MnemLoops = (m!=mLoops);
bool MneMLoopsaddOne = (m!=(mLoops+1));
bool input_flag = LayerType ? MnemLoops&&MneMLoopsaddOne: MnemLoops;
bool process_flag = LayerType ? MneZero&&MneMLoopsaddOne : MnemLoops;
bool write_flag = LayerType ? MneZero&&MneOne : MneZero;
if(pingpongm==0)
{
intra_pingpong_wrapper(Input,Weight,output_buffer1,beta_buffer,input_buffer0,input_buffer1,
InFM_num, Input_w, Input_h, OutFM_num, Kernel_size, Kernel_stride,
TMP_R, TMP_C, TMP_M, m, TM_MIN, TR_MIN, TC_MIN, TN, TRow, TCol, Padding,IHxIW,KxK,IFM_numxKxK,nLoops,IsNL,LayerType,TM, TMP_M_next1,TM_MIN_next1, pingpongm, input_flag, process_flag,
WeightQ);
write_back_output_reorg(output_buffer,Output,TMP_R,TMP_C,TMP_M_next0[0],output_w,output_h,TM_MIN_next0[0],TR_MIN,TC_MIN,OHxOW,write_flag);
pingpongm = 1;
}else
{
intra_pingpong_wrapper(Input,Weight,output_buffer,beta_buffer,input_buffer0,input_buffer1,
InFM_num, Input_w, Input_h, OutFM_num, Kernel_size, Kernel_stride,
TMP_R, TMP_C, TMP_M, m, TM_MIN, TR_MIN, TC_MIN, TN, TRow, TCol, Padding,IHxIW,KxK,IFM_numxKxK,nLoops,IsNL,LayerType,TM, TMP_M_next0,TM_MIN_next0, pingpongm, input_flag, process_flag,
WeightQ);
write_back_output_reorg(output_buffer1,Output,TMP_R,TMP_C,TMP_M_next1[0],output_w,output_h,TM_MIN_next1[0],TR_MIN,TC_MIN,OHxOW,write_flag);
pingpongm = 0;
}
}
}
}
}
#define MIN_VALUE (-1024*1024*1024)
#define MAX_VALUE (1024*1024*1024)
int quantize(float *in,float *out,int *offset,int layer_num,float *ap16_range,int *maxQ_array)
{
int i;
int offset_index = 0;
int woffset = 0;
for(i=0;i<layer_num;i++)
{
if(offset[offset_index]==0)
return i;
printf("Layer %2d;weight num=%12d ",i,offset[offset_index]);
int j;
float min,max;
min = MAX_VALUE;
max = MIN_VALUE;
for(j=0;j<offset[offset_index];j++)
{
float tmp_in_float = in[woffset+j];
if(tmp_in_float<min)
min = tmp_in_float;
if(tmp_in_float>max)
max = tmp_in_float;
}
printf("float min=%.7lf,max=%.7lf ",min,max);//find float min max
int k;
int maxQ = -1;
for(k=0;k<16;k++)//find maxQ
{
if(min>ap16_range[2*k]&&max<ap16_range[2*k+1])
{
maxQ = k;
}
else if(k==0)
{
printf("beyond Q0 min=%.7lf,max=%.7lf ",min,max);
break;
}
}
printf("maxQ=%d ",maxQ);
maxQ_array[i] = maxQ;
double max_error,min_error,sum_error;
sum_error = 0;
max_error = MIN_VALUE;
min_error = MAX_VALUE;
for(j=0;j<offset[offset_index];j++)
{
float tmp_in_float = in[woffset+j];
short tmp_fixed = (short)(tmp_in_float*pow(2.0,maxQ));
float tmp_out_float = (float)tmp_fixed*pow(2.0,-maxQ);
double error = (tmp_out_float - tmp_in_float)*(tmp_out_float - tmp_in_float);
error = sqrt(error);
sum_error += error;
if(error<min_error)
min_error = error;
if(error>max_error)
max_error = error;
out[woffset+j] = tmp_out_float;
}
printf("sum2_error = %.7lf,min_error=%.7lf,max_error=%.7lf",sum_error,min_error,max_error);
printf("\n");
woffset += offset[offset_index];
offset_index++;
}
return 0;
}
void yolov2_hls_ps(network *net, float *input)
{
int x;
network orig = *net;
net->input = input;
int weight_offset[32] = {864, 18432, 73728, 8192, 73728,
294912, 32768, 294912, 1179648, 131072, 1179648, 131072,
1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184,
9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024,
512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int offset_index = 0;
int *Weight_buf = (int *)calloc(203767168/4/2,sizeof(int));
int *Beta_buf = (int *)calloc((43044+4)/4/2,sizeof(int));
FILE *fp_w = fopen("weightsv2_comb_reorg_ap16.bin", "rb");
if(!fp_w) file_error("weightsv2_comb_reorg_ap16.bin");
FILE *fp_b = fopen("biasv2_comb_ap16.bin", "rb");
if(!fp_b) file_error("biasv2_comb_ap16.bin");
fread(Weight_buf, sizeof(int), 203767168/4/2, fp_w);
fread(Beta_buf, sizeof(int), (43044+4)/4/2, fp_b);
fclose(fp_w);
fclose(fp_b);
#define QNUM 23
short ap16_min = 0x8000;
short ap16_max = 0x7fff;
printf("ap16_min = %d \nap16_max = %d\n",ap16_min,ap16_max);
float ap16_range[16*2];
for(x=0;x<16;x++)
{
printf("Q%2d:",x);
ap16_range[2*x] = (float)ap16_min*pow((float)2,-x);//min
ap16_range[2*x+1] = (float)ap16_max*pow((float)2,-x);//max
printf("min=%.7lf,max=%.7lf\n",ap16_range[2*x],ap16_range[2*x+1]);
}
int maxQarray[QNUM+1];
int weightQ[QNUM];
int betaQ[QNUM];
FILE *Qin;
Qin = fopen("weightsv2_comb_reorg_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 2\n");
fread(weightQ,sizeof(int),QNUM,Qin);
fclose(Qin);
for(x=0;x<QNUM;x++)
printf("[%2d weightQ]=%2d\n",x,weightQ[x]);
Qin = fopen("biasv2_comb_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 4\n");
fread(betaQ,sizeof(int),QNUM,Qin);
fclose(Qin);
for(x=0;x<QNUM;x++)
printf("[%2d betaQ]=%2d\n",x,betaQ[x]);
#define MEM_LEN (416*416*32+208*208*32)
float *Memory_buf = (float*)calloc(MEM_LEN+1024+1024,sizeof(float));
float *Memory_top = Memory_buf+1024;
float *Memory_bottom = Memory_top + MEM_LEN;
//memcpy(Memory_top,input,416*416*3*sizeof(float));//416x416x3 input_pic
for(x=0;x<416*416*3;x++)//1st Layer input Q14
{
Memory_top[x] = ((short)(input[x]*pow(2.0,14)))*pow(2.0,-14);
}
float *inout_fixed_buf = (float *)calloc(sizeof(float),416*416*32);
maxQarray[0] = 14;//1st layer input Q14
float* in_ptr[32];
float* out_ptr[32];
#define ROUTE16_LEN (26*26*512)
#define CONV27_LEN (13*13*256)
#define CONV24_LEN (13*13*1024)
for(x=0;x<18;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - net->layers[x].outputs ;
}
else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
for(x=18;x<25;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs;
}else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
in_ptr[26] = Memory_bottom - ROUTE16_LEN;
out_ptr[26] = Memory_top;
in_ptr[27] = Memory_top;
out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN;
in_ptr[29] = out_ptr[27];
out_ptr[29] = Memory_top;
in_ptr[30] = Memory_top;
out_ptr[30] = Memory_bottom - net->layers[30].outputs;
in_ptr[31] = out_ptr[30];
network netp = *net;
int i;
int woffset = 0;
int aoffset = 0;
int boffset = 0;
int TR,TC,TM,TN;
int output_w,output_h;
int rLoops,cLoops,mLoops,nLoops;
double sum_gop = 0.0;
for(i = 0; i < netp.n; ++i)
{
netp.index = i;
layer l = netp.layers[i];
printf("Layer[%2d]: ",i);
switch(l.type)
{
case CONVOLUTIONAL:
printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
sum_gop += (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.;
output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ;
output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = MIN(l.n,Tm);
TN = MIN(l.c,Tn);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.n)/TM);
nLoops = (int)ceil(((float)l.c)/TN);
YOLO2_FPGA(in_ptr[i],out_ptr[i],Weight_buf+woffset/2,Beta_buf+boffset/2,
l.c,l.n,l.size,
l.stride,l.w,l.h,l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0,
TM,TN,TR,TC,
mLoops,nLoops,rLoops,cLoops,0,
weightQ[offset_index],betaQ[offset_index]);
quantize(out_ptr[i],inout_fixed_buf,&l.outputs, 1,ap16_range,&maxQarray[offset_index+1]);
memcpy(out_ptr[i],inout_fixed_buf,l.outputs*sizeof(float));
printf("TR=%d,TC=%d,TM=%d,TN=%d,rLoops=%d,cLoops=%d,mLoops=%d,nLoops=%d\n",TR,TC,TM,TN,rLoops,cLoops,mLoops,nLoops);
woffset += weight_offset[offset_index];
boffset += beta_offset[offset_index];
offset_index++;
break;
case MAXPOOL:
printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
//output_w = (l.w - l.size)/l.stride + 1 ;
//output_h = (l.h - l.size)/l.stride + 1 ;
output_w = l.out_h;
output_h = l.out_w;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TR = MIN(output_h,TR);
TC = MIN(output_w,TC);
TM = MIN(Tm,Tn);
TM = MIN(l.c,TM);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.c)/TM);
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c,
l.size,l.stride,l.w,l.h,l.pad,0,0,TM,0,TR,TC,mLoops,0,rLoops,cLoops,1,
0,0);
break;
case REORG:
printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = 26;
output_h = 32*13;
TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = 4;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = 1;
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4,
l.stride,l.stride,52,32*26,0,0,0,TM,0,TR,TC,mLoops,0,rLoops,cLoops,2,
0,0);
break;
case ROUTE:
printf("outputMemory:%8d;route ",l.outputs);
int j;
for(j = 0; j < l.n; ++j){
printf(" %d", l.input_layers[j]);
}
printf("\n");
break;
case REGION:
printf("outputMemory:%8d;Detection\n",l.outputs);
netp.input = in_ptr[i];
forward_region_layer(l,netp);
break;
}
netp.input = l.output;
}
printf("SUM_GOP=%g\n",sum_gop);
*net = orig;
for(i=0;i<QNUM+1;i++)
{
printf("[%2d layer input maxQ]=%2d\n",i,maxQarray[i]);
}
FILE* fout;
char layer_num_string[256];
char s[256];
sprintf(s,"yolov2_ap16_inout_maxQ_%d.bin", QNUM+1);
printf("%s\n",s);
fout = fopen(s,"wb");
if(!fout) printf("fopen %s error\n",s);
fwrite(maxQarray,sizeof(int), QNUM+1,fout);
fclose(fout);
free(Memory_buf);
free(Weight_buf);
free(Beta_buf);
}
///////////////////////////////////////////////////////////////////////20181108 reorg WeightQ BetaQ ok InputQ ok end
#endif
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
#include "acados_c/ocp_qp_interface.h"
/************************************************
* options
************************************************/
acados_size_t ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
acados_size_t size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// int ii;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
opts->initialize_t_slacks = 0;
// overwrite default submodules opts
// qp tolerance
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 0) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else if (!strcmp(field, "initialize_t_slacks"))
{
int* initialize_t_slacks = (int *) value;
if (*initialize_t_slacks != 0 && *initialize_t_slacks != 1)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for initialize_t_slacks field, need int 0 or 1, got %d.", *initialize_t_slacks);
exit(1);
}
opts->initialize_t_slacks = *initialize_t_slacks;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
}
}
return;
}
void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
acados_size_t ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
acados_size_t size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 3*8; // align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
align_char_to(8, &c_ptr);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
align_char_to(8, &c_ptr);
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
acados_size_t ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
acados_size_t size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
// zero timers
double total_time = 0.0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
mem->time_glob = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere.
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
//
if (opts->initialize_t_slacks > 0)
ocp_nlp_initialize_t_slacks(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// main sqp loop
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, nlp_mem->nlp_res, nlp_mem);
nlp_out->inf_norm_res = nlp_mem->nlp_res->inf_norm_res_stat;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_eq > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_eq :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_ineq > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_ineq :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_comp > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_comp :
nlp_out->inf_norm_res;
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = nlp_mem->nlp_res->inf_norm_res_stat;
mem->stat[mem->stat_n*sqp_iter+1] = nlp_mem->nlp_res->inf_norm_res_eq;
mem->stat[mem->stat_n*sqp_iter+2] = nlp_mem->nlp_res->inf_norm_res_ineq;
mem->stat[mem->stat_n*sqp_iter+3] = nlp_mem->nlp_res->inf_norm_res_comp;
}
// exit conditions on residuals
if ((nlp_mem->nlp_res->inf_norm_res_stat < opts->tol_stat) &
(nlp_mem->nlp_res->inf_norm_res_eq < opts->tol_eq) &
(nlp_mem->nlp_res->inf_norm_res_ineq < opts->tol_ineq) &
(nlp_mem->nlp_res->inf_norm_res_comp < opts->tol_comp))
{
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
if (opts->print_level > 0)
{
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq,
nlp_mem->nlp_res->inf_norm_res_comp );
printf("\n\n");
}
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
}
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(nlp_mem->qp_in);
if (opts->print_level > 0)
{
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq,
nlp_mem->nlp_res->inf_norm_res_comp );
printf("\n\n");
}
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
#ifndef ACADOS_SILENT
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#endif
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
if (opts->print_level > 1)
{
printf("\n Failed to solve the following QP:\n");
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
}
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
// globalization
acados_tic(&timer1);
double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_glob += acados_toc(&timer1);
// update variables
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
if (opts->print_level > 0)
{
if (sqp_iter%10 == 0)
{
printf("# it\tstat\t\teq\t\tineq\t\tcomp\n");
}
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp );
}
}
// stop timer
total_time += acados_toc(&timer0);
if (opts->print_level > 0)
printf("\n\n");
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
#ifndef ACADOS_SILENT
printf("\n ocp_nlp_sqp: maximum iterations reached\n");
#endif
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii], nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_,
char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_glob", field))
{
double *value = return_value_;
*value = mem->time_glob;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_mem->nlp_res;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver,
mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else if (!strcmp("res_stat", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_stat;
}
else if (!strcmp("res_eq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_eq;
}
else if (!strcmp("res_ineq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_ineq;
}
else if (!strcmp("res_comp", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_comp;
}
else if (!strcmp("cost_value", field))
{
double *value = return_value_;
*value = mem->nlp_mem->cost_value;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_opts_get(void *config_, void *dims_, void *opts_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
if (!strcmp("nlp_opts", field))
{
void **value = return_value_;
*value = opts->nlp_opts;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_opts_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_work_get(void *config_, void *dims_, void *work_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_workspace *work = work_;
if (!strcmp("nlp_work", field))
{
void **value = return_value_;
*value = work->nlp_work;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_work_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
config->opts_get = &ocp_nlp_sqp_opts_get;
config->work_get = &ocp_nlp_sqp_work_get;
return;
}
|
diagmm_x_sky_u_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT rowC = mat->rows;
ALPHA_INT colC = columns;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < rowC; ++r)
{
for (ALPHA_INT c = 0; c < colC; ++c)
{
ALPHA_Number t;
alpha_mul(t, alpha, x[index2(r, c, ldx)]);
alpha_mul(y[index2(r, c, ldy)], beta, y[index2(r, c, ldy)]);
alpha_add(y[index2(r, c, ldy)], y[index2(r, c, ldy)], t);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
read_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include <omp.h>
// module load gnu/9.3.0
// export OMP_NUM_THREADS=10
// # compiling: link header in stb folder
// gcc -fopenmp -std=gnu99 read_omp.c -o read_omp.x -Istb/ -lm
// # insida a computational node:
// /usr/bin/time ./read_omp.x photo5798471721416766496.jpg
int main(int argc,char* argv[]) {
int width,height,channels;
unsigned char *img = stbi_load(argv[1], &width, &height, &channels, 0);
#pragma OPTIMIZE OFF
FILE* f;
FILE* f_list;
f=fopen("color.txt","r");
int* r=malloc(sizeof(int)*50);
int* g=malloc(sizeof(int)*50);
int* b=malloc(sizeof(int)*50);
int* freq=malloc(50* sizeof(int));
for(int i=0;i<50;i++) {
int r_t,g_t,b_t;
fscanf(f,"(%d,%d,%d)\n",&r_t,&g_t,&b_t);
r[i]=r_t;
g[i]=g_t;
b[i]=b_t;
// printf("%d %d %d\n",r[i],g[i],b[i]);
freq[i]=0;
}
#pragma OPTIMIZE ON
int index;
double dist_t;
int v1,v2,v3;
double dist;
#pragma omp parallel shared(img, freq) firstprivate(dist_t, dist, v1, v2, v3, index, r, g, b) proc_bind(close)
{
#pragma omp for reduction(+: freq[:50]) // freq reduction as a 50 dim array
for(int i=0;i<width*height*3;i=i+3) {
// printf("%d %d %d \n",img[i],img[i+1],img[i+2]);
dist=sqrt(3*255*255);
index=0;
for(int j=0;j<50;j++) {
v1=img[i]-r[j];
v2=img[i+1]-g[j];
v3=img[i+2]-b[j];
dist_t=sqrt(v1*v1+v2*v2+v3*v3);
// printf("%f\n",dist_t);
if(dist_t<dist) {
dist=dist_t;
index=j;
}
}
// printf("index= %d\n", index);
//#pragma omp atomic
freq[index] += 1;
}
} //end of parallel region
printf("%s,",argv[1]); for(int i=0;i<50;i++) { printf("%9.9f",freq[i]/(double)(width*height)); if(i<49) printf(","); } printf("\n");
return 0;
}
|
false_sharing_correct.c | #include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
static void init_array(double a[], int start, int stop, int incr);
static void do_work(double a[], int nr_runs, int start, int stop, int incr);
#define N 1000
int main(void) {
const int nr_threads = 2;
const int n = N;
const int nr_runs = 20000000;
double a[N], sum = 0.0;
int i;
omp_set_dynamic(0);
omp_set_num_threads(nr_threads);
#pragma omp parallel default(none) shared(a)
{
#pragma omp sections
{
#pragma omp section
{
struct timeval tv1, tv2;
int thread_nr = omp_get_thread_num();
init_array(a, n/2, n, 1);
gettimeofday(&tv1, NULL);
do_work(a, nr_runs, n/2, n, 1);
gettimeofday(&tv2, NULL);
printf("thread %d: %.6f\n", thread_nr,
1.0e-6*(tv2.tv_usec - tv1.tv_usec) +
(tv2.tv_sec - tv1.tv_sec));
}
#pragma omp section
{
struct timeval tv1, tv2;
int thread_nr = omp_get_thread_num();
init_array(a, 0, n/2, 1);
gettimeofday(&tv1, NULL);
do_work(a, nr_runs, 0, n/2, 1);
gettimeofday(&tv2, NULL);
printf("thread %d: %.6f\n", thread_nr,
1.0e-6*(tv2.tv_usec - tv1.tv_usec) +
(tv2.tv_sec - tv1.tv_sec));
}
}
}
sum = 0.0;
for (i = 0; i < n; i++)
sum += a[i];
printf("no false sharing: %.1lf\n", sum);
#pragma omp parallel default(none) shared(a)
{
#pragma omp sections
{
#pragma omp section
{
struct timeval tv1, tv2;
int thread_nr = omp_get_thread_num();
init_array(a, 0, n, 2);
gettimeofday(&tv1, NULL);
do_work(a, nr_runs, 0, n, 2);
gettimeofday(&tv2, NULL);
printf("thread %d: %.6f\n", thread_nr,
1.0e-6*(tv2.tv_usec - tv1.tv_usec) +
(tv2.tv_sec - tv1.tv_sec));
}
#pragma omp section
{
struct timeval tv1, tv2;
int thread_nr = omp_get_thread_num();
init_array(a, 1, n, 2);
gettimeofday(&tv1, NULL);
do_work(a, nr_runs, 1, n, 2);
gettimeofday(&tv2, NULL);
printf("thread %d: %.6f\n", thread_nr,
1.0e-6*(tv2.tv_usec - tv1.tv_usec) +
(tv2.tv_sec - tv1.tv_sec));
}
}
}
sum = 0.0;
for (i = 0; i < n; i++)
sum += a[i];
printf("false sharing: %.1lf\n", sum);
return EXIT_SUCCESS;
}
void init_array(double a[], int start, int stop, int incr) {
int i;
for (i = start; i < stop; i += incr)
a[i] = 0.0;
}
void do_work(double a[], int nr_runs, int start, int stop, int incr) {
int run_nr, i;
for (run_nr = 0; run_nr < nr_runs; run_nr++)
for (i = start; i < stop ;i += incr)
a[i] += i;
}
|
par_map.h | #pragma once
#include "Defs.h"
#include <functional>
template <typename T>
void par_fill(T* dest, int w, int h, const T value) {
#pragma omp parallel for
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int idx = y * w + x;
dest[idx] = value;
}
}
}
template <typename T>
void par_map(T* src, T* dest, int w, int h, std::function<T(T)> f) {
#pragma omp parallel for
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int idx = y * w + x;
dest[idx] = f(src[idx]);
}
}
}
|
omp_masked.c | <ompts:test>
<ompts:testdescription>Test which checks the omp masked directive by counting up a variable in a omp masked section.</ompts:testdescription>
<ompts:ompversion>5.1</ompts:ompversion>
<ompts:directive>omp masked</ompts:directive>
<ompts:dependences>omp critical</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
/*
* TODO not checked up to now: no implied barrier, check threads of team
*/
int <ompts:testcode:functionname>omp_masked</ompts:testcode:functionname>(FILE * logFile)
{
<ompts:orphan:vars>
int nthreads;
int executing_thread;
</ompts:orphan:vars>
nthreads = 0;
executing_thread = -1;
#pragma omp parallel
{
<ompts:orphan>
<ompts:check>#pragma omp masked</ompts:check>
{
#pragma omp critical
{
nthreads++;
}
executing_thread = omp_get_thread_num();
} /* end of master*/
</ompts:orphan>
} /* end of parallel*/
printf("Number of threads in block: %d\n", nthreads);
printf("Executing thread: %d\n", executing_thread);
return ((nthreads == 1) && (executing_thread == 0));
}
</ompts:testcode>
</ompts:test>
|
GB_binop__max_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint16)
// A*D function (colscale): GB (_AxD__max_uint16)
// D*A function (rowscale): GB (_DxB__max_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint16)
// C=scalar+B GB (_bind1st__max_uint16)
// C=scalar+B' GB (_bind1st_tran__max_uint16)
// C=A+scalar GB (_bind2nd__max_uint16)
// C=A'+scalar GB (_bind2nd_tran__max_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT16 || GxB_NO_MAX_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define HOST_MAX_TEAMS 128
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
double * pA = malloc(N*sizeof(double));
int fail = 0;
INIT();
//
// Test: if clause
//
ZERO(A);
int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
// the number of teams started is implementation dependent
int actual_teams = -1;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams if(0) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: device clause
//
ZERO(A);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams device(0) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: map clause
//
ZERO(pA);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams map(pA[:N]) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
pA[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (pA[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, pA[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: num_teams and omp_get_team_num()
//
ZERO(A);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams)
{
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: thread_limit and omp_get_thread_num()
//
ZERO(A);
fail = 0;
int num_threads = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(1) thread_limit(num_threads)
#pragma omp parallel
{
int tid = omp_get_thread_num();
A[tid] += (double) tid;
}
}
for (int i = 0 ; i < num_threads ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: if statement in teams region
//
ZERO(A);
fail = 0;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams)
{
if (omp_get_team_num() % 2 == 0) {
int teid = omp_get_team_num();
A[teid] += (double) 1;
}
else {
int teid = omp_get_team_num();
A[teid] += (double) 2;
}
}
}
for (int i = 0 ; i < num_teams ; i++) {
if (i % 2 == 0) {
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
} else
if (A[i] != 2*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) 2*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
/* // */
/* // Test: num_teams and thread_limit by simulating a distribute pragma */
/* // */
/* ZERO(A); */
/* fail = 0; */
/* for (int t = 0 ; t < TRIALS ; t++) { */
/* #pragma omp target teams num_teams(2) thread_limit(496) */
/* { */
/* if (omp_get_team_num() == 0) { */
/* #pragma omp parallel */
/* { */
/* A[omp_get_team_num()*496+omp_get_thread_num()] += omp_get_thread_num(); */
/* if(omp_get_thread_num() == 498) printf("teid = %d, tid = %d, accessing %d\n", omp_get_team_num(), omp_get_thread_num(), omp_get_team_num()*496+omp_get_thread_num()); */
/* } */
/* } else { */
/* #pragma omp parallel */
/* { */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* A[omp_get_team_num()*496+omp_get_thread_num()] -= omp_get_thread_num(); */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* } */
/* } */
/* } */
/* } */
/* for (int i = 0 ; i < 992 ; i++) { */
/* if (i < 496) { */
/* if (A[i] != i*TRIALS) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); */
/* fail = 1; */
/* } */
/* } else if(i >= 496) */
/* if (A[i] != -((i-496)*TRIALS)) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) -((i-496)*TRIALS), A[i]); */
/* fail = 1; */
/* } */
/* } */
/* if(fail) printf("Failed\n"); */
/* else printf("Succeeded\n"); */
//
// Test: private
//
ZERO(A);
fail = 0;
int a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams) private(a)
{
a = omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: firstprivate
//
ZERO(A);
fail = 0;
a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams) firstprivate(a)
{
a += omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != 10+i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
selu_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: hhchen@openailab.com
*/
#include "selu_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_selu_fp32(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param,
int num_thread)
{
float* data = ( float* )input_tensor->data;
float* out_data = ( float* )output_tensor->data;
float alpha = selu_param->alpha;
float lambda = selu_param->lambda;
float alpha_lambda = alpha * lambda;
int chan_num = input_tensor->dims[0] * input_tensor->dims[1];
int chan_size = input_tensor->dims[2] * input_tensor->dims[3];
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
float* input_data = ( float* )input_tensor->data + i * chan_size;
float* output_data = ( float* )output_tensor->data + i * chan_size;
for (int i = 0; i < chan_size; i++)
{
if (input_data[i] < 0.f)
output_data[i] = (exp(input_data[i]) - 1.f) * alpha_lambda;
else
output_data[i] = input_data[i] * lambda;
}
}
return 0;
}
int ref_selu_uint8(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param,
int num_thread)
{
/* dequant */
uint8_t* input_uint8 = input_tensor->data;
uint8_t* output_uint8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
int input_size = input_tensor->elem_num;
int output_size = output_tensor->elem_num;
float* input_data = ( float* )sys_malloc(input_size * sizeof(float));
float* output_data = ( float* )sys_malloc(output_size * sizeof(float));
for (int i = 0; i < input_size; i++)
{
input_data[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale;
}
float alpha = selu_param->alpha;
float lambda = selu_param->lambda;
float alpha_lambda = alpha * lambda;
int chan_num = input_tensor->dims[0] * input_tensor->dims[1];
int chan_size = input_tensor->dims[2] * input_tensor->dims[3];
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
float* input_data = ( float* )input_tensor->data + i * chan_size;
float* output_data = ( float* )output_tensor->data + i * chan_size;
for (int i = 0; i < chan_size; i++)
{
if (input_data[i] < 0.f)
output_data[i] = (exp(input_data[i]) - 1.f) * alpha_lambda;
else
output_data[i] = input_data[i] * lambda;
}
}
/* quant */
for (int i = 0; i < output_size; i++)
{
int udata = round(output_data[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(input_data);
sys_free(output_data);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct selu_param* selu_param = ( struct selu_param* )ir_node->op.param_mem;
int num_thread = exec_graph->num_thread;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_selu_fp32(output_tensor, input_tensor, selu_param, num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_selu_uint8(output_tensor, input_tensor, selu_param, num_thread);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_selu_ref_op()
{
return register_builtin_node_ops(OP_SELU, &hcl_node_ops);
}
int unregister_selu_ref_op()
{
return unregister_builtin_node_ops(OP_SELU, &hcl_node_ops);
}
|
testis.c |
/*[]*/
struct __sFILEX ;
/*[]*/
int printf(const char *restrict , ...);
/*[]*/
extern void timer_clear(int );
/*[]*/
extern void timer_start(int );
/*[]*/
extern void timer_stop(int );
/*[]*/
extern double timer_read(int );
/*[]*/
extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand);
/*[]*/
typedef int INT_TYPE;
/*[]*/
INT_TYPE *key_buff_ptr_global;
/*[]*/
int passed_verification;
/*[]*/
INT_TYPE key_array[(1 << 16)];
/*[]*/
INT_TYPE key_buff1[(1 << 16)];
/*[]*/
INT_TYPE key_buff2[(1 << 16)];
/*[]*/
INT_TYPE partial_verify_vals[5];
/*[]*/
INT_TYPE test_index_array[5];
/*[]*/
INT_TYPE test_rank_array[5];
/*[]*/
INT_TYPE S_test_index_array[5] = {48427, 17148 , 23627 , 62548 , 4431};
/*[]*/
INT_TYPE S_test_rank_array[5] = {0, 18 , 346 , 64917 , 65463};
/*[]*/
INT_TYPE W_test_index_array[5] = {357773, 934767 , 875723 , 898999 , 404505};
/*[]*/
INT_TYPE W_test_rank_array[5] = {1249, 11698 , 1039987 , 1043896 , 1048018};
/*[]*/
INT_TYPE A_test_index_array[5] = {2112377, 662041 , 5336171 , 3642833 , 4250760};
/*[]*/
INT_TYPE A_test_rank_array[5] = {104, 17523 , 123928 , 8288932 , 8388264};
/*[]*/
INT_TYPE B_test_index_array[5] = {41869, 812306 , 5102857 , 18232239 , 26860214};
/*[]*/
INT_TYPE B_test_rank_array[5] = {33422937, 10244 , 59149 , 33135281 , 99};
/*[]*/
INT_TYPE C_test_index_array[5] = {44172927, 72999161 , 74326391 , 129606274 , 21736814};
/*[]*/
INT_TYPE C_test_rank_array[5] = {61147, 882988 , 266290 , 133997595 , 133525895};
/*[]*/
double randlc(double *X, double *A);
/*[]*/
void full_verify(void );
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
double randlc(double *X, double *A) {
/*[1; ]*/
/*[1; ]*/
static int KS = 0;
/*[1; ]*/
static double R23;
/*[1; ]*/
static double R46;
/*[1; ]*/
static double T23;
/*[1; ]*/
static double T46;
/*[1; ]*/
double T1;
/*[1; ]*/
double T2;
/*[1; ]*/
double T3;
/*[1; ]*/
double T4;
/*[1; ]*/
double A1;
/*[1; ]*/
double A2;
/*[1; ]*/
double X1;
/*[1; ]*/
double X2;
/*[1; ]*/
double Z;
/*[1; ]*/
int i;
/*[1; ]*/
int j;
/*[1; ]*/
/*[1; ]*/
if (KS == 0) {
/*[1; ]*/
/*[1; ]*/
R23 = 1.0;
/*[1; ]*/
R46 = 1.0;
/*[1; ]*/
T23 = 1.0;
/*[1; ]*/
T46 = 1.0;
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
for (i = 1; i <= 23; i++) {
/*[1; ]*/
/*[1; ]*/
R23 = 0.50 * R23;
/*[1; ]*/
T23 = 2.0 * T23;
}
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
for (i = 1; i <= 46; i++) {
/*[1; ]*/
/*[1; ]*/
R46 = 0.50 * R46;
/*[1; ]*/
T46 = 2.0 * T46;
}
/*[1; ]*/
KS = 1;
}
/*[1; ]*/
T1 = R23 * *A;
/*[1; ]*/
j = T1;
/*[1; ]*/
A1 = j;
/*[1; ]*/
A2 = *A - T23 * A1;
/*[1; ]*/
T1 = R23 * *X;
/*[1; ]*/
j = T1;
/*[1; ]*/
X1 = j;
/*[1; ]*/
X2 = *X - T23 * X1;
/*[1; ]*/
T1 = A1 * X2 + A2 * X1;
/*[1; ]*/
j = R23 * T1;
/*[1; ]*/
T2 = j;
/*[1; ]*/
Z = T1 - T23 * T2;
/*[1; ]*/
T3 = T23 * Z + A2 * X2;
/*[1; ]*/
j = R46 * T3;
/*[1; ]*/
T4 = j;
/*[1; ]*/
*X = T3 - T46 * T4;
/*[1; ]*/
return (R46 * *X);
}
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
void create_seq(double seed, double a) {
/*[1; ]*/
/*[1; ]*/
double x;
/*[1; ]*/
int i;
/*[1; ]*/
int k;
/*[1; ]*/
k = (1 << 11) / 4;
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
for (i = 0; i < (1 << 16); i++) {
/*[1; ]*/
/*[1; ]*/
double *_imopVarPre16;
/*[1; ]*/
double *_imopVarPre17;
/*[1; ]*/
double _imopVarPre18;
/*[1; ]*/
_imopVarPre16 = &a;
/*[1; ]*/
_imopVarPre17 = &seed;
/*[1; ]*/
_imopVarPre18 = randlc(_imopVarPre17, _imopVarPre16);
/*[1; ]*/
/*[1; ]*/
x = _imopVarPre18;
/*[1; ]*/
double *_imopVarPre21;
/*[1; ]*/
double *_imopVarPre22;
/*[1; ]*/
double _imopVarPre23;
/*[1; ]*/
_imopVarPre21 = &a;
/*[1; ]*/
_imopVarPre22 = &seed;
/*[1; ]*/
_imopVarPre23 = randlc(_imopVarPre22, _imopVarPre21);
/*[1; ]*/
/*[1; ]*/
x += _imopVarPre23;
/*[1; ]*/
double *_imopVarPre26;
/*[1; ]*/
double *_imopVarPre27;
/*[1; ]*/
double _imopVarPre28;
/*[1; ]*/
_imopVarPre26 = &a;
/*[1; ]*/
_imopVarPre27 = &seed;
/*[1; ]*/
_imopVarPre28 = randlc(_imopVarPre27, _imopVarPre26);
/*[1; ]*/
/*[1; ]*/
x += _imopVarPre28;
/*[1; ]*/
double *_imopVarPre31;
/*[1; ]*/
double *_imopVarPre32;
/*[1; ]*/
double _imopVarPre33;
/*[1; ]*/
_imopVarPre31 = &a;
/*[1; ]*/
_imopVarPre32 = &seed;
/*[1; ]*/
_imopVarPre33 = randlc(_imopVarPre32, _imopVarPre31);
/*[1; ]*/
/*[1; ]*/
x += _imopVarPre33;
/*[1; ]*/
key_array[i] = k * x;
}
}
/*[8; 16; ]*/
void full_verify() {
/*[8; 16; ]*/
/*[8; 16; ]*/
INT_TYPE i;
/*[8; 16; ]*/
INT_TYPE j;
/*[8; 16; ]*/
/*[8; 16; ]*/
/*[8; 16; ]*/
/*[8; 16; ]*/
for (i = 0; i < (1 << 16); i++) {
/*[8; 16; ]*/
/*[8; 16; ]*/
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
}
/*[8; 16; ]*/
j = 0;
/*[8; 16; ]*/
/*[8; 16; ]*/
/*[8; 16; ]*/
/*[8; 16; ]*/
for (i = 1; i < (1 << 16); i++) {
/*[8; 16; ]*/
/*[8; 16; ]*/
/*[8; 16; ]*/
if (key_array[i - 1] > key_array[i]) {
/*[8; 16; ]*/
/*[8; 16; ]*/
j++;
}
}
/*[8; 16; ]*/
/*[8; 16; ]*/
if (j != 0) {
/*[8; 16; ]*/
/*[8; 16; ]*/
printf("Full_verify: number of keys out of sort: %d\n", j);
/*[8; 16; ]*/
} else {
/*[8; 16; ]*/
/*[8; 16; ]*/
passed_verification++;
}
}
/*[4; ]*/
/*[4; ]*/
void rank(int iteration) {
/*[4; ]*/
/*[4; ]*/
INT_TYPE i;
/*[4; ]*/
INT_TYPE k;
/*[4; ]*/
11 - 9;
/*[4; ]*/
INT_TYPE prv_buff1[(1 << 11)];
/*[4; ]*/
#pragma omp master
{
/*[4; ]*/
/*[4; ]*/
key_array[iteration] = iteration;
/*[4; ]*/
key_array[iteration + 10] = (1 << 11) - iteration;
/*[4; ]*/
/*[4; ]*/
/*[4; ]*/
/*[4; ]*/
for (i = 0; i < 5; i++) {
/*[4; ]*/
/*[4; ]*/
partial_verify_vals[i] = key_array[test_index_array[i]];
}
/*[4; ]*/
/*[4; ]*/
/*[4; ]*/
/*[4; ]*/
for (i = 0; i < (1 << 11); i++) {
/*[4; ]*/
/*[4; ]*/
key_buff1[i] = 0;
}
}
/*[4; ]*/
// #pragma omp dummyFlush BARRIER_START written([key_buff1.f, key_array.f, partial_verify_vals.f]) read([key_buff2, key_array, key_array.f, key_buff2.f, i])
/*[4; ]*/
#pragma omp barrier
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
for (i = 0; i < (1 << 11); i++) {
/*[5; 8; ]*/
/*[5; 8; ]*/
prv_buff1[i] = 0;
}
/*[5; 8; ]*/
#pragma omp for nowait
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
for (i = 0; i < (1 << 16); i++) {
/*[5; 8; ]*/
/*[5; 8; ]*/
key_buff2[i] = key_array[i];
/*[5; 8; ]*/
prv_buff1[key_buff2[i]]++;
}
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
for (i = 0; i < (1 << 11) - 1; i++) {
/*[5; 8; ]*/
/*[5; 8; ]*/
prv_buff1[i + 1] += prv_buff1[i];
}
/*[5; 8; ]*/
// #pragma omp dummyFlush CRITICAL_START written([key_buff2.f]) read([key_buff1.f, key_buff1])
/*[5; 8; ]*/
#pragma omp critical
{
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
/*[5; 8; ]*/
for (i = 0; i < (1 << 11); i++) {
/*[5; 8; ]*/
/*[5; 8; ]*/
key_buff1[i] += prv_buff1[i];
}
}
/*[5; 8; ]*/
// #pragma omp dummyFlush CRITICAL_END written([key_buff1.f]) read([])
/*[5; 8; ]*/
// #pragma omp dummyFlush BARRIER_START written([]) read([key_buff1.f, test_rank_array.f, key_buff1, test_rank_array, passed_verification, partial_verify_vals, partial_verify_vals.f, printf, _imopVarPre35])
/*[5; 8; ]*/
#pragma omp barrier
/*[6; ]*/
#pragma omp master
{
/*[6; ]*/
/*[6; ]*/
/*[6; ]*/
/*[6; ]*/
/*[6; ]*/
for (i = 0; i < 5; i++) {
/*[6; ]*/
/*[6; ]*/
k = partial_verify_vals[i];
/*[6; ]*/
int _imopVarPre35;
/*[6; ]*/
_imopVarPre35 = 0 <= k;
/*[6; ]*/
/*[6; ]*/
if (_imopVarPre35) {
/*[6; ]*/
/*[6; ]*/
_imopVarPre35 = k <= (1 << 16) - 1;
}
/*[6; ]*/
/*[6; ]*/
if (_imopVarPre35) {
/*[6; ]*/
/*[6; ]*/
/*[6; ]*/
switch ('S') {
/*[]*/
/*[6; ]*/
/*[6; ]*/
case 'S': if (i <= 2) {
/*[6; ]*/
/*[6; ]*/
/*[6; ]*/
if (key_buff1[k - 1] != test_rank_array[i] + iteration) {
/*[6; ]*/
/*[6; ]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[6; ]*/
} else {
/*[6; ]*/
/*[6; ]*/
passed_verification++;
}
} else {
/*[6; ]*/
/*[6; ]*/
/*[6; ]*/
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
/*[6; ]*/
/*[6; ]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[6; ]*/
} else {
/*[6; ]*/
/*[6; ]*/
passed_verification++;
}
}
/*[6; ]*/
break;
/*[]*/
/*[]*/
case 'W': if (i < 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] + (iteration - 2)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
/*[]*/
case 'A': if (i <= 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] + (iteration - 1)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] - (iteration - 1)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
case 'B': ;
/*[]*/
int _imopVarPre36;
/*[]*/
int _imopVarPre37;
/*[]*/
_imopVarPre36 = i == 1;
/*[]*/
/*[]*/
if (!_imopVarPre36) {
/*[]*/
/*[]*/
_imopVarPre37 = i == 2;
/*[]*/
/*[]*/
if (!_imopVarPre37) {
/*[]*/
/*[]*/
_imopVarPre37 = i == 4;
}
/*[]*/
_imopVarPre36 = _imopVarPre37;
}
/*[]*/
/*[]*/
if (_imopVarPre36) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] + iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
/*[]*/
case 'C': if (i <= 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] + iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
}
}
}
/*[6; ]*/
/*[6; ]*/
if (iteration == 10) {
/*[6; ]*/
/*[6; ]*/
key_buff_ptr_global = key_buff1;
}
}
}
/*[]*/
/*[]*/
/*[]*/
int main(int argc, char **argv) {
/*[]*/
/*[]*/
int _imopVarPre39;
/*[]*/
int i;
/*[]*/
int iteration;
/*[]*/
int nthreads = 1;
/*[]*/
double timecounter;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i < 5; i++) {
/*[]*/
/*[]*/
/*[]*/
switch ('S') {
/*[]*/
/*[]*/
case 'S': test_index_array[i] = S_test_index_array[i];
/*[]*/
test_rank_array[i] = S_test_rank_array[i];
/*[]*/
break;
/*[]*/
case 'A': test_index_array[i] = A_test_index_array[i];
/*[]*/
test_rank_array[i] = A_test_rank_array[i];
/*[]*/
break;
/*[]*/
case 'W': test_index_array[i] = W_test_index_array[i];
/*[]*/
test_rank_array[i] = W_test_rank_array[i];
/*[]*/
break;
/*[]*/
case 'B': test_index_array[i] = B_test_index_array[i];
/*[]*/
test_rank_array[i] = B_test_rank_array[i];
/*[]*/
break;
/*[]*/
case 'C': test_index_array[i] = C_test_index_array[i];
/*[]*/
test_rank_array[i] = C_test_rank_array[i];
/*[]*/
break;
}
}
/*[]*/
#pragma omp parallel private(iteration)
{
/*[1; ]*/
/*[1; ]*/
double _imopVarPre42;
/*[1; ]*/
int _imopVarPre43;
/*[1; ]*/
#pragma omp master
{
/*[1; ]*/
/*[1; ]*/
;
/*[1; ]*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - IS Benchmark\n\n");
/*[1; ]*/
/*[1; ]*/
_imopVarPre39 = (1 << 16);
/*[1; ]*/
printf(" Size: %d (class %c)\n", _imopVarPre39, 'S');
/*[1; ]*/
/*[1; ]*/
printf(" Iterations: %d\n", 10);
/*[1; ]*/
/*[1; ]*/
timer_clear(0);
/*[1; ]*/
/*[1; ]*/
create_seq(314159265.00, 1220703125.00);
/*[1; ]*/
}
/*[1; ]*/
int iteration_imopVarPre75;
/*[1; ]*/
iteration_imopVarPre75 = 1;
/*[1; ]*/
INT_TYPE i_imopVarPre76;
/*[1; ]*/
INT_TYPE k;
/*[1; ]*/
11 - 9;
/*[1; ]*/
INT_TYPE prv_buff1[(1 << 11)];
/*[1; ]*/
#pragma omp master
{
/*[1; ]*/
/*[1; ]*/
key_array[iteration_imopVarPre75] = iteration_imopVarPre75;
/*[1; ]*/
key_array[iteration_imopVarPre75 + 10] = (1 << 11) - iteration_imopVarPre75;
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < 5; i_imopVarPre76++) {
/*[1; ]*/
/*[1; ]*/
partial_verify_vals[i_imopVarPre76] = key_array[test_index_array[i_imopVarPre76]];
}
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
/*[1; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11); i_imopVarPre76++) {
/*[1; ]*/
/*[1; ]*/
key_buff1[i_imopVarPre76] = 0;
}
}
/*[1; ]*/
// #pragma omp dummyFlush BARRIER_START written([key_buff1.f, key_array.f, test_rank_array.f, seed, T46, R46, KS, test_index_array.f, partial_verify_vals.f, T23, R23, _imopVarPre39]) read([key_buff2, key_array, key_array.f, key_buff2.f, i_imopVarPre76])
/*[1; ]*/
#pragma omp barrier
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11); i_imopVarPre76++) {
/*[13; ]*/
/*[13; ]*/
prv_buff1[i_imopVarPre76] = 0;
}
/*[13; ]*/
#pragma omp for nowait
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 16); i_imopVarPre76++) {
/*[13; ]*/
/*[13; ]*/
key_buff2[i_imopVarPre76] = key_array[i_imopVarPre76];
/*[13; ]*/
prv_buff1[key_buff2[i_imopVarPre76]]++;
}
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11) - 1; i_imopVarPre76++) {
/*[13; ]*/
/*[13; ]*/
prv_buff1[i_imopVarPre76 + 1] += prv_buff1[i_imopVarPre76];
}
/*[13; ]*/
// #pragma omp dummyFlush CRITICAL_START written([key_buff2.f]) read([key_buff1.f, key_buff1])
/*[13; ]*/
#pragma omp critical
{
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11); i_imopVarPre76++) {
/*[13; ]*/
/*[13; ]*/
key_buff1[i_imopVarPre76] += prv_buff1[i_imopVarPre76];
}
}
/*[13; ]*/
// #pragma omp dummyFlush CRITICAL_END written([key_buff1.f]) read([key_buff1.f, test_rank_array.f, key_buff1, test_rank_array, passed_verification, partial_verify_vals, partial_verify_vals.f, printf, _imopVarPre35])
/*[13; ]*/
#pragma omp master
{
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
for (i_imopVarPre76 = 0; i_imopVarPre76 < 5; i_imopVarPre76++) {
/*[13; ]*/
/*[13; ]*/
k = partial_verify_vals[i_imopVarPre76];
/*[13; ]*/
int _imopVarPre35;
/*[13; ]*/
_imopVarPre35 = 0 <= k;
/*[13; ]*/
/*[13; ]*/
if (_imopVarPre35) {
/*[13; ]*/
/*[13; ]*/
_imopVarPre35 = k <= (1 << 16) - 1;
}
/*[13; ]*/
/*[13; ]*/
if (_imopVarPre35) {
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
switch ('S') {
/*[]*/
/*[13; ]*/
/*[13; ]*/
case 'S': if (i_imopVarPre76 <= 2) {
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + iteration_imopVarPre75) {
/*[13; ]*/
/*[13; ]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[13; ]*/
} else {
/*[13; ]*/
/*[13; ]*/
passed_verification++;
}
} else {
/*[13; ]*/
/*[13; ]*/
/*[13; ]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) {
/*[13; ]*/
/*[13; ]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[13; ]*/
} else {
/*[13; ]*/
/*[13; ]*/
passed_verification++;
}
}
/*[13; ]*/
break;
/*[]*/
/*[]*/
case 'W': if (i_imopVarPre76 < 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + (iteration_imopVarPre75 - 2)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
/*[]*/
case 'A': if (i_imopVarPre76 <= 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + (iteration_imopVarPre75 - 1)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - (iteration_imopVarPre75 - 1)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
case 'B': ;
/*[]*/
int _imopVarPre36;
/*[]*/
int _imopVarPre37;
/*[]*/
_imopVarPre36 = i_imopVarPre76 == 1;
/*[]*/
/*[]*/
if (!_imopVarPre36) {
/*[]*/
/*[]*/
_imopVarPre37 = i_imopVarPre76 == 2;
/*[]*/
/*[]*/
if (!_imopVarPre37) {
/*[]*/
/*[]*/
_imopVarPre37 = i_imopVarPre76 == 4;
}
/*[]*/
_imopVarPre36 = _imopVarPre37;
}
/*[]*/
/*[]*/
if (_imopVarPre36) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + iteration_imopVarPre75) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
/*[]*/
case 'C': if (i_imopVarPre76 <= 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + iteration_imopVarPre75) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
}
}
}
/*[13; ]*/
/*[13; ]*/
if (iteration_imopVarPre75 == 10) {
/*[13; ]*/
/*[13; ]*/
key_buff_ptr_global = key_buff1;
}
}
/*[13; ]*/
// #pragma omp dummyFlush BARRIER_START written([key_buff_ptr_global, passed_verification]) read([key_array, key_array.f, timer_start, key_buff1, partial_verify_vals, test_index_array.f, test_index_array, printf])
/*[13; ]*/
#pragma omp barrier
/*[7; ]*/
#pragma omp master
{
/*[7; ]*/
/*[7; ]*/
passed_verification = 0;
/*[7; ]*/
/*[7; ]*/
if ('S' != 'S') {
/*[7; ]*/
/*[7; ]*/
printf("\n iteration\n");
/*[7; ]*/
}
/*[7; ]*/
timer_start(0);
/*[7; ]*/
}
/*[4; 7; ]*/
/*[17; 4; 7; ]*/
/*[17; ]*/
/*[4; 7; ]*/
for (iteration = 1; iteration <= 10; iteration++) {
/*[17; 4; 7; ]*/
/*[17; 4; 7; ]*/
#pragma omp master
{
/*[17; 4; 7; ]*/
/*[17; 4; 7; ]*/
/*[17; 4; 7; ]*/
if ('S' != 'S') {
/*[17; 4; 7; ]*/
/*[17; 4; 7; ]*/
printf(" %d\n", iteration);
/*[17; 4; 7; ]*/
}
}
/*[17; 7; ]*/
INT_TYPE i_imopVarPre77;
/*[17; 7; ]*/
INT_TYPE k_imopVarPre78;
/*[17; 7; ]*/
11 - 9;
/*[17; 7; ]*/
INT_TYPE prv_buff1_imopVarPre79[(1 << 11)];
/*[17; 7; ]*/
#pragma omp master
{
/*[17; 7; ]*/
/*[17; 7; ]*/
key_array[iteration] = iteration;
/*[17; 7; ]*/
key_array[iteration + 10] = (1 << 11) - iteration;
/*[17; 7; ]*/
/*[17; 7; ]*/
/*[17; 7; ]*/
/*[17; 7; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < 5; i_imopVarPre77++) {
/*[17; 7; ]*/
/*[17; 7; ]*/
partial_verify_vals[i_imopVarPre77] = key_array[test_index_array[i_imopVarPre77]];
}
/*[17; 7; ]*/
/*[17; 7; ]*/
/*[17; 7; ]*/
/*[17; 7; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11); i_imopVarPre77++) {
/*[17; 7; ]*/
/*[17; 7; ]*/
key_buff1[i_imopVarPre77] = 0;
}
}
/*[17; 7; ]*/
// #pragma omp dummyFlush BARRIER_START written([key_buff1.f, key_array.f, passed_verification, partial_verify_vals.f]) read([key_buff2, key_array, key_array.f, key_buff2.f, i_imopVarPre77])
/*[17; 7; ]*/
#pragma omp barrier
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11); i_imopVarPre77++) {
/*[16; ]*/
/*[16; ]*/
prv_buff1_imopVarPre79[i_imopVarPre77] = 0;
}
/*[16; ]*/
#pragma omp for nowait
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 16); i_imopVarPre77++) {
/*[16; ]*/
/*[16; ]*/
key_buff2[i_imopVarPre77] = key_array[i_imopVarPre77];
/*[16; ]*/
prv_buff1_imopVarPre79[key_buff2[i_imopVarPre77]]++;
}
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11) - 1; i_imopVarPre77++) {
/*[16; ]*/
/*[16; ]*/
prv_buff1_imopVarPre79[i_imopVarPre77 + 1] += prv_buff1_imopVarPre79[i_imopVarPre77];
}
/*[16; ]*/
// #pragma omp dummyFlush CRITICAL_START written([key_buff2.f]) read([key_buff1.f, key_buff1])
/*[16; ]*/
#pragma omp critical
{
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11); i_imopVarPre77++) {
/*[16; ]*/
/*[16; ]*/
key_buff1[i_imopVarPre77] += prv_buff1_imopVarPre79[i_imopVarPre77];
}
}
/*[16; ]*/
// #pragma omp dummyFlush CRITICAL_END written([key_buff1.f]) read([key_buff1.f, test_rank_array.f, key_buff1, test_rank_array, _imopVarPre35, passed_verification, partial_verify_vals, partial_verify_vals.f, printf])
/*[16; ]*/
#pragma omp master
{
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
for (i_imopVarPre77 = 0; i_imopVarPre77 < 5; i_imopVarPre77++) {
/*[16; ]*/
/*[16; ]*/
k_imopVarPre78 = partial_verify_vals[i_imopVarPre77];
/*[16; ]*/
int _imopVarPre35;
/*[16; ]*/
_imopVarPre35 = 0 <= k_imopVarPre78;
/*[16; ]*/
/*[16; ]*/
if (_imopVarPre35) {
/*[16; ]*/
/*[16; ]*/
_imopVarPre35 = k_imopVarPre78 <= (1 << 16) - 1;
}
/*[16; ]*/
/*[16; ]*/
if (_imopVarPre35) {
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
switch ('S') {
/*[]*/
/*[16; ]*/
/*[16; ]*/
case 'S': if (i_imopVarPre77 <= 2) {
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + iteration) {
/*[16; ]*/
/*[16; ]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[16; ]*/
} else {
/*[16; ]*/
/*[16; ]*/
passed_verification++;
}
} else {
/*[16; ]*/
/*[16; ]*/
/*[16; ]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) {
/*[16; ]*/
/*[16; ]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[16; ]*/
} else {
/*[16; ]*/
/*[16; ]*/
passed_verification++;
}
}
/*[16; ]*/
break;
/*[]*/
/*[]*/
case 'W': if (i_imopVarPre77 < 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + (iteration - 2)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
/*[]*/
case 'A': if (i_imopVarPre77 <= 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + (iteration - 1)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - (iteration - 1)) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
case 'B': ;
/*[]*/
int _imopVarPre36;
/*[]*/
int _imopVarPre37;
/*[]*/
_imopVarPre36 = i_imopVarPre77 == 1;
/*[]*/
/*[]*/
if (!_imopVarPre36) {
/*[]*/
/*[]*/
_imopVarPre37 = i_imopVarPre77 == 2;
/*[]*/
/*[]*/
if (!_imopVarPre37) {
/*[]*/
/*[]*/
_imopVarPre37 = i_imopVarPre77 == 4;
}
/*[]*/
_imopVarPre36 = _imopVarPre37;
}
/*[]*/
/*[]*/
if (_imopVarPre36) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
/*[]*/
/*[]*/
case 'C': if (i_imopVarPre77 <= 2) {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
} else {
/*[]*/
/*[]*/
/*[]*/
if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) {
/*[]*/
/*[]*/
printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77);
/*[]*/
} else {
/*[]*/
/*[]*/
passed_verification++;
}
}
/*[]*/
break;
}
}
}
/*[16; ]*/
/*[16; ]*/
if (iteration == 10) {
/*[16; ]*/
/*[16; ]*/
key_buff_ptr_global = key_buff1;
}
}
/*[16; ]*/
// #pragma omp dummyFlush BARRIER_START written([key_buff_ptr_global, passed_verification]) read([key_array, key_array.f, key_buff1, partial_verify_vals, test_index_array.f, test_index_array, printf])
/*[16; ]*/
#pragma omp barrier
}
/*[17; 7; ]*/
// #pragma omp dummyFlush BARRIER_START written([passed_verification]) read([key_array, key_buff2, key_buff1.f, key_array.f, key_buff2.f, key_buff_ptr_global, c_print_results, _imopVarPre42, _imopVarPre43, timer_read, full_verify, printf, nthreads, nullCell, timecounter, passed_verification, timer_stop])
/*[17; 7; ]*/
#pragma omp barrier
/*[8; 16; ]*/
#pragma omp master
{
/*[8; 16; ]*/
/*[8; 16; ]*/
timer_stop(0);
/*[8; 16; ]*/
/*[8; 16; ]*/
timecounter = timer_read(0);
/*[8; 16; ]*/
/*[8; 16; ]*/
full_verify();
/*[8; 16; ]*/
/*[8; 16; ]*/
/*[8; 16; ]*/
if (passed_verification != 5 * 10 + 1) {
/*[8; 16; ]*/
/*[8; 16; ]*/
passed_verification = 0;
}
/*[8; 16; ]*/
_imopVarPre42 = ((double) (10 * (1 << 16))) / timecounter / 1000000.;
/*[8; 16; ]*/
_imopVarPre43 = (1 << 16);
/*[8; 16; ]*/
c_print_results("IS", 'S', _imopVarPre43, 0, 0, 10, nthreads, timecounter, _imopVarPre42, "keys ranked", passed_verification, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randlc");
/*[8; 16; ]*/
}
}
}
|
mclib.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hdf5.h"
#include <math.h>
#include <time.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_sf_bessel.h>
#include "mclib.h"
#include <omp.h>
#define PROP_DIM1 1
#define PROP_DIM2 8
#define PROP_DIM3 8
#define COORD_DIM1 2
//define constants
const double A_RAD=7.56e-15, C_LIGHT=2.99792458e10, PL_CONST=6.6260755e-27;
const double K_B=1.380658e-16, M_P=1.6726231e-24, THOMP_X_SECT=6.65246e-25, M_EL=9.1093879e-28 ;
void printPhotons(struct photon *ph, int num_ph, int frame,char dir[200] )
{
//function to save the photons' positions and 4 momentum
int i=0;
char mc_file_p0[200], mc_file_p1[200],mc_file_p2[200], mc_file_p3[200];
char mc_file_r0[200], mc_file_r1[200], mc_file_r2[200], mc_file_ns[200];
FILE *fPtr=NULL, *fPtr1=NULL,*fPtr2=NULL,*fPtr3=NULL,*fPtr4=NULL,*fPtr5=NULL,*fPtr6=NULL,*fPtr7=NULL;
//make strings for proper files
snprintf(mc_file_p0,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_P0.dat" );
snprintf(mc_file_p1,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_P1.dat" );
snprintf(mc_file_p2,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_P2.dat" );
snprintf(mc_file_p3,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_P3.dat" );
snprintf(mc_file_r0,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_R0.dat" );
snprintf(mc_file_r1,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_R1.dat" );
snprintf(mc_file_r2,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_R2.dat" );
snprintf(mc_file_ns,sizeof(mc_file_p0),"%s%s%d%s",dir,"mcdata_", frame,"_NS.dat" ); //for number of scatterings each photon went through
//save the energy
fPtr=fopen(mc_file_p0, "a");
fPtr1=fopen(mc_file_p1, "a");
fPtr2=fopen(mc_file_p2, "a");
fPtr3=fopen(mc_file_p3, "a");
fPtr4=fopen(mc_file_r0, "a");
fPtr5=fopen(mc_file_r1, "a");
fPtr6=fopen(mc_file_r2, "a");
fPtr7=fopen(mc_file_ns, "a");
//printf("Writing P0\n");
for (i=0;i<num_ph;i++)
{
fprintf(fPtr,"%0.13e\t", (ph+i)->p0);
//printf("%d: %0.13e \n", i, (ph+i)->p0);
fprintf(fPtr1,"%0.13e\t", (ph+i)->p1);
//printf("%d: %0.13e \n", i, (ph+i)->p1);
fprintf(fPtr2,"%0.13e\t", (ph+i)->p2);
//printf("%d: %0.13e \n", i, (ph+i)->p2);
fprintf(fPtr3,"%0.13e\t", (ph+i)->p3);
//printf("%d: %0.13e \n", i, (ph+i)->p3);
fprintf(fPtr4,"%0.13e\t", (ph+i)->r0);
//printf("%d: %0.13e \n", i, (ph+i)->r0);
fprintf(fPtr5,"%0.13e\t", (ph+i)->r1);
//printf("%d: %0.13e \n", i, (ph+i)->r1);
fprintf(fPtr6,"%0.13e\t", (ph+i)->r2);
//printf("%d: %0.13e \n", i, (ph+i)->r2);
//fprintf(fPtr7,"%0.13e\t", *(ph_num_scatt+i));
fprintf(fPtr7,"%e\t", (ph+i)->num_scatt);
//printf("%d: %0.13e \n", i, (ph+i)->num_scatt);
}
fclose(fPtr);
fclose(fPtr1);
fclose(fPtr2);
fclose(fPtr3);
fclose(fPtr4);
fclose(fPtr5);
fclose(fPtr6);
fclose(fPtr7);
//printf("%s\n%s\n%s\n", mc_file_p0, mc_file_r0, mc_file_ns);
}
void saveCheckpoint(char dir[200], int frame, int scatt_frame, int ph_num,double time_now, struct photon *ph, int last_frame )
{
//function to save data necessary to restart simulation if it ends
//need to save all photon data
FILE *fPtr=NULL;
char checkptfile[200];
char restart;
int i=0;
snprintf(checkptfile,sizeof(checkptfile),"%s%s",dir,"mc_chkpt.dat" );
fPtr=fopen(checkptfile, "wb");
if (scatt_frame!=last_frame)
{
restart='c';
fwrite(&restart, sizeof(char), 1, fPtr);
fwrite(&frame, sizeof(int), 1, fPtr);
fwrite(&scatt_frame, sizeof(int), 1, fPtr);
fwrite(&time_now, sizeof(double), 1, fPtr);
fwrite(&ph_num, sizeof(int), 1, fPtr);
//for(i=0;i<ph_num;i++)
//{
fwrite((ph), sizeof (struct photon )*ph_num, ph_num, fPtr);
//}
}
else
{
//just finished last iteration of scatt_frame
restart='r';
fwrite(&restart, sizeof(char), 1, fPtr);
fwrite(&frame, sizeof(int), 1, fPtr);
}
fclose(fPtr);
}
void readCheckpoint(char dir[200], struct photon **ph, int *framestart, int *scatt_framestart, int *ph_num, char *restart, double *time )
{
//function to read in data from checkpoint file
FILE *fPtr=NULL;
char checkptfile[200];
int i=0;
//int frame, scatt_frame, ph_num, i=0;
struct photon *phHolder=NULL; //pointer to struct to hold data read in from checkpoint file
snprintf(checkptfile,sizeof(checkptfile),"%s%s",dir,"mc_chkpt.dat" );
fPtr=fopen(checkptfile, "rb");
fread(restart, sizeof(char), 1, fPtr);
printf("%c\n", *restart);
fread(framestart, sizeof(int), 1, fPtr);
printf("%d\n", *framestart);
if((*restart)=='c')
{
fread(scatt_framestart, sizeof(int), 1, fPtr);
*scatt_framestart+=1; //add one to start at the next frame after the siomulation was interrrupted
printf("%d\n", *scatt_framestart);
fread(time, sizeof(double), 1, fPtr);
printf("%e\n", *time);
fread(ph_num, sizeof(int), 1, fPtr);
printf("%d\n", *ph_num);
phHolder=malloc(sizeof(struct photon));
(*ph)=malloc(sizeof(struct photon)*(*ph_num)); //allocate memory to hold photon data
for (i=0;i<(*ph_num);i++)
{
fread(phHolder, sizeof(struct photon), 1, fPtr);
//printf("%e,%e,%e, %e,%e,%e, %e, %e\n",(ph)->p0, (ph)->p1, (ph)->p2, ph->p3, (ph)->r0, (ph)->r1, (ph)->r2, ph->num_scatt );
(*ph)[i].p0=phHolder->p0;
(*ph)[i].p1=phHolder->p1;
(*ph)[i].p2=phHolder->p2;
(*ph)[i].p3=phHolder->p3;
(*ph)[i].r0= phHolder->r0;
(*ph)[i].r1=phHolder->r1 ;
(*ph)[i].r2=phHolder->r2;
(*ph)[i].num_scatt=phHolder->num_scatt;
}
free(phHolder);
}
else
{
*framestart+=1; //if the checkpoint file saved and the program was inturrupted before the frame variable had just increased and before the scatt_frame iteration was saved, add one to the frame start
}
fclose(fPtr);
}
void readMcPar(char file[200], double *fps, double *theta_jmin, double *theta_j, double *inj_radius, int *frm0,int *last_frm, int *frm2, int *photon_num, double *ph_weight, char *spect, char *restart)
{
//function to read mc.par file
FILE *fptr=NULL;
char buf[100];
double theta_deg;
//open file
fptr=fopen(file,"r");
//read in frames per sec and other variables outlined in main()
fscanf(fptr, "%lf",fps);
//printf("%f\n", *fps );
fgets(buf, 100,fptr);
fscanf(fptr, "%d",frm0);
//printf("%d\n", *frm0 );
fgets(buf, 100,fptr);
fscanf(fptr, "%d",last_frm);
//printf("%d\n", *last_frm );
fgets(buf, 100,fptr);
fscanf(fptr, "%d",frm2);
*frm2+=*frm0; //frame to go to is what is given in the file plus the starting frame
//printf("%d\n", *frm2 );
fgets(buf, 100,fptr);
fscanf(fptr, "%d",photon_num);
//printf("%d\n", *photon_num );
fgets(buf, 100,fptr);
fscanf(fptr, "%lf",inj_radius);
//printf("%lf\n", *inj_radius );
fgets(buf, 100,fptr);
//theta jmin
fscanf(fptr, "%lf",&theta_deg);
*theta_jmin=theta_deg*M_PI/180;
//printf("%f\n", *theta_jmin );
fgets(buf, 100,fptr);
fscanf(fptr, "%lf",&theta_deg);
*theta_j=theta_deg*M_PI/180;
//printf("%f\n", *theta_j );
fgets(buf, 100,fptr);
fscanf(fptr, "%lf",ph_weight);
fgets(buf, 100,fptr);
*spect=getc(fptr);
fgets(buf, 100,fptr);
//printf("%c\n",*spect);
*restart=getc(fptr);
//close file
fclose(fptr);
}
void readAndDecimate(char flash_file[200], double r_inj, double **x, double **y, double **szx, double **szy, double **r,\
double **theta, double **velx, double **vely, double **dens, double **pres, double **gamma, double **dens_lab, double **temp, int *number)
{
//function to read in data from FLASH file
hid_t file,dset, space;
herr_t status;
hsize_t dims[2]={0,0}; //hold dimension size for coordinate data set (mostly interested in dims[0])
double **vel_x_buffer=NULL, **vel_y_buffer=NULL, **dens_buffer=NULL, **pres_buffer=NULL, **coord_buffer=NULL, **block_sz_buffer=NULL;
double *velx_unprc=NULL, *vely_unprc=NULL, *dens_unprc=NULL, *pres_unprc=NULL, *x_unprc=NULL, *y_unprc=NULL, *r_unprc=NULL, *szx_unprc=NULL, *szy_unprc=NULL;
int i,j,count,x1_count, y1_count, r_count, **node_buffer=NULL, num_nodes=0;
double x1[8]={-7.0/16,-5.0/16,-3.0/16,-1.0/16,1.0/16,3.0/16,5.0/16,7.0/16};
file = H5Fopen (flash_file, H5F_ACC_RDONLY, H5P_DEFAULT);
printf(">> mc.py: Reading positional, density, pressure, and velocity information...\n");
//printf("Reading coord\n");
dset = H5Dopen (file, "coordinates", H5P_DEFAULT);
//get dimensions of array and save it
space = H5Dget_space (dset);
H5Sget_simple_extent_dims(space, dims, NULL); //save dimesnions in dims
/*
* Allocate array of pointers to rows. OPTIMIZE HERE: INITALIZE ALL THE BUFFERS AT ONCE IN 1 FOR LOOP
*/
coord_buffer = (double **) malloc (dims[0] * sizeof (double *));
coord_buffer[0] = (double *) malloc (dims[0] * dims[1] * sizeof (double));
block_sz_buffer= (double **) malloc (dims[0] * sizeof (double *));
block_sz_buffer[0] = (double *) malloc (dims[0] * COORD_DIM1 * sizeof (double));
node_buffer= (int **) malloc (dims[0] * sizeof (int *));
node_buffer[0] = (int *) malloc (dims[0] * sizeof (int));
vel_x_buffer= (double **) malloc (dims[0] * sizeof (double *));
vel_x_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
vel_y_buffer= (double **) malloc (dims[0] * sizeof (double *));
vel_y_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
dens_buffer= (double **) malloc (dims[0] * sizeof (double *));
dens_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
pres_buffer= (double **) malloc (dims[0] * sizeof (double *));
pres_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
/*
* Set the rest of the pointers to rows to the correct addresses.
*/
for (i=1; i<dims[0]; i++)
{
coord_buffer[i] = coord_buffer[0] + i * dims[1];
block_sz_buffer[i] = block_sz_buffer[0] + i * COORD_DIM1;
node_buffer[i] = node_buffer[0] + i ;
vel_x_buffer[i] = vel_x_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
vel_y_buffer[i] = vel_y_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
dens_buffer[i] = dens_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
pres_buffer[i] = pres_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
}
// for (i=1; i<dims[0]; i++)
// coord_buffer[i] = coord_buffer[0] + i * dims[1];
//read data such that first column is x and second column is y
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,coord_buffer[0]);
//close dataset
status = H5Sclose (space);
status = H5Dclose (dset);
//printf("Reading block size\n");
dset = H5Dopen (file, "block size", H5P_DEFAULT);
// block_sz_buffer= (double **) malloc (dims[0] * sizeof (double *));
//
// block_sz_buffer[0] = (double *) malloc (dims[0] * COORD_DIM1 * sizeof (double));
//
// for (i=1; i<dims[0]; i++){
// block_sz_buffer[i] = block_sz_buffer[0] + i * COORD_DIM1;
// }
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,block_sz_buffer[0]);
// first column of buffer is x and second column is y
status = H5Dclose (dset);
//printf("Reading node type\n");
dset = H5Dopen (file, "node type", H5P_DEFAULT);
// node_buffer= (int **) malloc (dims[0] * sizeof (int *));
// node_buffer[0] = (int *) malloc (dims[0] * sizeof (int));
//
// for (i=1; i<dims[0]; i++){
// node_buffer[i] = node_buffer[0] + i ;
// }
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,node_buffer[0]);
status = H5Dclose (dset);
//printf("Reading velx\n");
dset = H5Dopen (file, "velx", H5P_DEFAULT);
// vel_x_buffer= (double **) malloc (dims[0] * sizeof (double *));
// vel_x_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
//
// for (i=1; i<dims[0]; i++)
// {
// vel_x_buffer[i] = vel_x_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
// }
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,vel_x_buffer[0]);
status = H5Dclose (dset);
//printf("Reading vely\n");
dset = H5Dopen (file, "vely", H5P_DEFAULT);
// vel_y_buffer= (double **) malloc (dims[0] * sizeof (double *));
// vel_y_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
//
// for (i=1; i<dims[0]; i++)
// {
// vel_y_buffer[i] = vel_y_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
// }
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,vel_y_buffer[0]);
status = H5Dclose (dset);
//printf("Reading dens\n");
dset = H5Dopen (file, "dens", H5P_DEFAULT);
// dens_buffer= (double **) malloc (dims[0] * sizeof (double *));
// dens_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
// for (i=1; i<dims[0]; i++)
// {
// dens_buffer[i] = dens_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
// }
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,dens_buffer[0]);
status = H5Dclose (dset);
//printf("Reading pres\n");
dset = H5Dopen (file, "pres", H5P_DEFAULT);
// pres_buffer= (double **) malloc (dims[0] * sizeof (double *));
// pres_buffer[0]= (double *) malloc (dims[0] * PROP_DIM1 *PROP_DIM2*PROP_DIM3* sizeof (double));
// for (i=1; i<dims[0]; i++)
// {
// pres_buffer[i] = pres_buffer[0] + i * PROP_DIM1*PROP_DIM2*PROP_DIM3;
// }
//printf("Reading Dataset\n");
status = H5Dread (dset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,pres_buffer[0]);
status = H5Dclose (dset);
status = H5Fclose (file);
printf(">> Selecting good node types (=1)\n");
//find out how many good nodes there are
for (i=0;i<dims[0];i++)
{
if (node_buffer[i][0]==1 ){
num_nodes++;
}
}
//allocate memory for arrays to hold unprocessed data
pres_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
dens_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
velx_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
vely_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
x_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
y_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
r_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
szx_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
szy_unprc=malloc (num_nodes* PROP_DIM1 *PROP_DIM2*PROP_DIM3 * sizeof (double ));
//find where the good values corresponding to the good gones (=1) and save them to the previously allocated pointers which are 1D arrays
//also create proper x and y arrays and block size arrays
//and then free up the buffer memory space
printf(">> Creating and reshaping arrays\n");
count=0;
for (i=0;i<dims[0];i++)
{
if (node_buffer[i][0]==1 )
{
x1_count=0;
y1_count=0;
for (j=0;j<(PROP_DIM1*PROP_DIM2*PROP_DIM3);j++)
{
*(pres_unprc+count)=pres_buffer[i][j];
*(dens_unprc+count)=dens_buffer[i][j];
*(velx_unprc+count)=vel_x_buffer[i][j];
*(vely_unprc+count)=vel_y_buffer[i][j];
*(szx_unprc+count)=((block_sz_buffer[i][0])/8)*1e9; //divide by 8 for resolution, multiply by 1e9 to scale properly?
*(szy_unprc+count)=((block_sz_buffer[i][1])/8)*1e9;
if (j%8==0)
{
x1_count=0;
}
if ((j%8==0) && (j!=0))
{
y1_count++;
}
*(x_unprc+count)=(coord_buffer[i][0]+block_sz_buffer[i][0]*x1[x1_count])*1e9;
*(y_unprc+count)=(coord_buffer[i][1]+block_sz_buffer[i][1]*x1[y1_count])*1e9;
//printf("%d,%d,%d,%d\n",count,j,x1_count,y1_count);
x1_count++;
count++;
}
}
}
free (pres_buffer[0]); free (dens_buffer[0]);free (vel_x_buffer[0]);free (vel_y_buffer[0]); free(coord_buffer[0]);free(block_sz_buffer[0]);free(node_buffer[0]);
free (pres_buffer);free(dens_buffer);free(vel_x_buffer);free(vel_y_buffer);free(coord_buffer);free(block_sz_buffer);free(node_buffer);
//fill in radius array and find in how many places r > injection radius
r_count=0;
for (i=0;i<count;i++)
{
*(r_unprc+i)=pow((pow(*(x_unprc+i),2)+pow(*(y_unprc+i),2)),0.5);
if (*(r_unprc+i)> (0.95*r_inj) )
{
r_count++;
}
}
/*
//find in how many places r > injection radius
r_count=0;
for (i=0;i<count;i++)
{
if (*(r_unprc+i)> (0.95*r_inj) )
{
r_count++;
}
}
*/
//allocate memory to hold processed data
(*pres)=malloc (r_count * sizeof (double ));
(*velx)=malloc (r_count * sizeof (double ));
(*vely)=malloc (r_count * sizeof (double ));
(*dens)=malloc (r_count * sizeof (double ));
(*x)=malloc (r_count * sizeof (double ));
(*y)=malloc (r_count * sizeof (double ));
(*r)=malloc (r_count * sizeof (double ));
(*theta)=malloc (r_count * sizeof (double ));
(*gamma)=malloc (r_count * sizeof (double ));
(*dens_lab)=malloc (r_count * sizeof (double ));
(*szx)=malloc (r_count * sizeof (double ));
(*szy)=malloc (r_count * sizeof (double ));
(*temp)=malloc (r_count * sizeof (double ));
//assign values based on r> 0.95*r_inj
j=0;
for (i=0;i<count;i++)
{
if (*(r_unprc+i)> (0.95*r_inj) )
{
(*pres)[j]=*(pres_unprc+i);
(*velx)[j]=*(velx_unprc+i);
(*vely)[j]=*(vely_unprc+i);
(*dens)[j]=*(dens_unprc+i);
(*x)[j]=*(x_unprc+i);
(*y)[j]=*(y_unprc+i);
(*r)[j]=*(r_unprc+i);
(*szx)[j]=*(szx_unprc+i);
(*szy)[j]=*(szy_unprc+i);
(*theta)[j]=atan2( *(x_unprc+i) , *(y_unprc+i) );//theta in radians in relation to jet axis
(*gamma)[j]=pow(pow(1.0-(pow(*(velx_unprc+i),2)+pow(*(vely_unprc+i),2)),0.5),-1); //v is in units of c
(*dens_lab)[j]= (*(dens_unprc+i)) * (pow(pow(1.0-(pow(*(velx_unprc+i),2)+pow(*(vely_unprc+i),2)),0.5),-1));
(*temp)[j]=pow(3*(*(pres_unprc+i))*pow(C_LIGHT,2.0)/(A_RAD) ,1.0/4.0);
j++;
}
}
*number=j;
free(pres_unprc); free(velx_unprc);free(vely_unprc);free(dens_unprc);free(x_unprc); free(y_unprc);free(r_unprc);free(szx_unprc);free(szy_unprc);
}
void photonInjection( struct photon **ph, int *ph_num, double r_inj, double ph_weight, char spect, int array_length, double fps, double theta_min, double theta_max,\
double *x, double *y, double *szx, double *szy, double *r, double *theta, double *temps, double *vx, double *vy, gsl_rng * rand)
{
int i=0, block_cnt=0, *ph_dens=NULL, ph_tot=0, j=0,k=0;
double ph_dens_calc=0.0, fr_dum=0.0, y_dum=0.0, yfr_dum=0.0, fr_max=0, bb_norm=0, position_phi;
double com_v_phi, com_v_theta, *p_comv=NULL, *boost=NULL; //comoving phi, theta, comoving 4 momentum for a photon, and boost for photon(to go to lab frame)
double *l_boost=NULL; //pointer to hold array of lorentz boost, to lab frame, values
float num_dens_coeff;
if (spect=='w') //from MCRAT paper, w for wien spectrum
{
num_dens_coeff=8.44;
//printf("in wien spectrum\n");
}
else
{
num_dens_coeff=20.29; //this is for black body spectrum
//printf("in BB spectrum");
}
//find how many blocks are near the injection radius within the angles defined in mc.par, get temperatures and calculate number of photons to allocate memory for
//and then rcord which blocks have to have "x" amount of photons injected there
for(i=0;i<array_length;i++)
{
//look at all boxes in width delta r=c/fps and within angles we are interested in NEED TO IMPLEMENT
if ((*(r+i) > (r_inj - C_LIGHT/fps)) && (*(r+i) < (r_inj + C_LIGHT/fps) ) && (*(theta+i)< theta_max) && (*(theta+i) > theta_min) )
{
block_cnt++;
}
}
//printf("Blocks: %d\n", block_cnt);
//allocate memory to record density of photons for each block
ph_dens=malloc(block_cnt * sizeof(int));
//calculate the photon density for each block and save it to the array
j=0;
ph_tot=0;
for (i=0;i<array_length;i++)
{
//printf("%d\n",i);
//printf("%e, %e, %e, %e, %e, %e\n", *(r+i),(r_inj - C_LIGHT/fps), (r_inj + C_LIGHT/fps), *(theta+i) , theta_max, theta_min);
if ((*(r+i) > (r_inj - C_LIGHT/fps)) && (*(r+i) < (r_inj + C_LIGHT/fps) ) && (*(theta+i)< theta_max) && (*(theta+i) > theta_min) )
{
ph_dens_calc=num_dens_coeff*2.0*M_PI*(*(x+i))*pow(*(temps+i),3.0)*pow(*(szx+i),2.0) /(ph_weight) ; //a*T^3/(weight) dV, dV=2*PI*x*dx^2,
*(ph_dens+j)=gsl_ran_poisson(rand,ph_dens_calc) ; //choose from poission distribution with mean of ph_dens_calc
//printf("%d, %lf \n",*(ph_dens+j), ph_dens_calc);
//sum up all the densities to get total number of photons
ph_tot+=(*(ph_dens+j));
j++;
}
}
//printf("%d\n", ph_tot);
//allocate memory for that many photons and also allocate memory to hold comoving 4 momentum of each photon and the velocity of the fluid
(*ph)=malloc (ph_tot * sizeof (struct photon ));
p_comv=malloc(4*sizeof(double));
boost=malloc(3*sizeof(double));
l_boost=malloc(4*sizeof(double));
//go through blocks and assign random energies/locations to proper number of photons
ph_tot=0;
k=0;
for (i=0;i<array_length;i++)
{
if ((*(r+i) > (r_inj - C_LIGHT/fps)) && (*(r+i) < (r_inj + C_LIGHT/fps) ) && (*(theta+i)< theta_max) && (*(theta+i) > theta_min) )
{
//*(temps+i)=0.76*(*(temps+i));
for(j=0;j<(*(ph_dens+k));j++ )
{
//have to get random frequency for the photon comoving frequency
y_dum=1; //initalize loop
yfr_dum=0;
while (y_dum>yfr_dum)
{
fr_dum=gsl_rng_uniform_pos(rand)*6.3e11*(*(temps+i)); //in Hz
//printf("%lf, %lf ",gsl_rng_uniform_pos(rand), (*(temps+i)));
y_dum=gsl_rng_uniform_pos(rand);
//printf("%lf ",fr_dum);
if (spect=='w')
{
yfr_dum=(1.0/(1.29e31))*pow((fr_dum/(*(temps+i))),3.0)/(exp((PL_CONST*fr_dum)/(K_B*(*(temps+i)) ))-1); //curve is normalized to maximum
}
else
{
fr_max=(C_LIGHT*(*(temps+i)))/(0.29); //max frequency of bb
bb_norm=pow((fr_max/(*(temps+i))),2.0)/(exp(PL_CONST*fr_max/K_B/(*(temps+i)))-1); //find value of bb at fr_max
yfr_dum=(1.0/bb_norm)*pow((fr_dum/(*(temps+i))),2.0)/(exp((PL_CONST*fr_dum)/(K_B*(*(temps+i)) ))-1);//curve is normalized to vaue of bb @ max frequency
}
//printf("%lf, %lf,%lf,%e \n",(*(temps+i)),fr_dum, y_dum, yfr_dum);
}
//printf("%lf\n ",fr_dum);
position_phi=gsl_rng_uniform(rand)*2*M_PI;
com_v_phi=gsl_rng_uniform(rand)*2*M_PI;
com_v_theta=acos((gsl_rng_uniform(rand)*2)-1);
//printf("%lf, %lf, %lf\n", position_phi, com_v_phi, com_v_theta);
//populate 4 momentum comoving array
*(p_comv+0)=PL_CONST*fr_dum/C_LIGHT;
*(p_comv+1)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*cos(com_v_phi);
*(p_comv+2)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*sin(com_v_phi);
*(p_comv+3)=(PL_CONST*fr_dum/C_LIGHT)*cos(com_v_theta);
//populate boost matrix, not sure why multiplying by -1, seems to give correct answer in old python code...
*(boost+0)=-1*(*(vx+i))*cos(position_phi);
*(boost+1)=-1*(*(vx+i))*sin(position_phi);
*(boost+2)=-1*(*(vy+i));
//printf("%lf, %lf, %lf\n", *(boost+0), *(boost+1), *(boost+2));
//boost to lab frame
lorentzBoost(boost, p_comv, l_boost, 'p');
//printf("Assignemnt: %e, %e, %e, %e\n", *(l_boost+0), *(l_boost+1), *(l_boost+2),*(l_boost+3));
(*ph)[ph_tot].p0=(*(l_boost+0));
(*ph)[ph_tot].p1=(*(l_boost+1));
(*ph)[ph_tot].p2=(*(l_boost+2));
(*ph)[ph_tot].p3=(*(l_boost+3));
(*ph)[ph_tot].r0= (*(x+i))*cos(position_phi); //put photons @ center of box that they are supposed to be in with random phi
(*ph)[ph_tot].r1=(*(x+i))*sin(position_phi) ;
(*ph)[ph_tot].r2=(*(y+i)); //y coordinate in flash becomes z coordinate in MCRaT
(*ph)[ph_tot].num_scatt=0;
//printf("%d\n",ph_tot);
ph_tot++;
}
k++;
}
}
*ph_num=ph_tot; //save number of photons
free(ph_dens); free(p_comv);free(boost); free(l_boost);
}
void lorentzBoost(double *boost, double *p_ph, double *result, char object)
{
//function to perform lorentz boost
//if doing boost for an electron last argument is 'e' and there wont be a check for zero norm
//if doing boost for a photon last argument is 'p' and there will be a check for zero norm
double beta=0, gamma=0, *boosted_p=NULL;
gsl_vector_view b=gsl_vector_view_array(boost, 3); //make boost pointer into vector
gsl_vector_view p=gsl_vector_view_array(p_ph, 4); //make boost pointer into vector
gsl_matrix *lambda1= gsl_matrix_calloc (4, 4); //create matrix thats 4x4 to do lorentz boost
gsl_vector *p_ph_prime =gsl_vector_calloc(4); //create vestor to hold lorentz boosted vector
/*
printf("Boost: %e, %e, %e, %e\n",gsl_blas_dnrm2(&b.vector), *(boost+0), *(boost+1), *(boost+2));
printf("4 Momentum to Boost: %e, %e, %e, %e\n",*(p_ph+0), *(p_ph+1), *(p_ph+2), *(p_ph+3));
*/
//if magnitude of fluid velocity is != 0 do lorentz boost otherwise dont need to do a boost
if (gsl_blas_dnrm2(&b.vector) > 0)
{
//printf("in If\n");
beta=gsl_blas_dnrm2(&b.vector);
gamma=1.0/sqrt(1-pow(beta, 2.0));
//printf("Beta: %e\tGamma: %e\n",beta,gamma );
//initalize matrix values
gsl_matrix_set(lambda1, 0,0, gamma);
gsl_matrix_set(lambda1, 0,1, -1*gsl_vector_get(&b.vector,0)*gamma);
gsl_matrix_set(lambda1, 0,2, -1*gsl_vector_get(&b.vector,1)*gamma);
gsl_matrix_set(lambda1, 0,3, -1*gsl_vector_get(&b.vector,2)*gamma);
gsl_matrix_set(lambda1, 1,1, 1+((gamma-1)*pow(gsl_vector_get(&b.vector,0),2.0)/pow(beta,2.0) ) );
gsl_matrix_set(lambda1, 1,2, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,1)/pow(beta,2.0) ) ));
gsl_matrix_set(lambda1, 1,3, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,2)/pow(beta,2.0) ) ));
gsl_matrix_set(lambda1, 2,2, 1+((gamma-1)*pow(gsl_vector_get(&b.vector,1),2.0)/pow(beta,2.0) ) );
gsl_matrix_set(lambda1, 2,3, ((gamma-1)*(gsl_vector_get(&b.vector,1)* gsl_vector_get(&b.vector,2)/pow(beta,2.0)) ) );
gsl_matrix_set(lambda1, 3,3, 1+((gamma-1)*pow(gsl_vector_get(&b.vector,2),2.0)/pow(beta,2.0) ) );
gsl_matrix_set(lambda1, 1,0, gsl_matrix_get(lambda1,0,1));
gsl_matrix_set(lambda1, 2,0, gsl_matrix_get(lambda1,0,2));
gsl_matrix_set(lambda1, 3,0, gsl_matrix_get(lambda1,0,3));
gsl_matrix_set(lambda1, 2,1, gsl_matrix_get(lambda1,1,2));
gsl_matrix_set(lambda1, 3,1, gsl_matrix_get(lambda1,1,3));
gsl_matrix_set(lambda1, 3,2, gsl_matrix_get(lambda1,2,3));
gsl_blas_dgemv(CblasNoTrans, 1, lambda1, &p.vector, 0, p_ph_prime );
/*
printf("Lorentz Boost Matrix 0: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 0,0), gsl_matrix_get(lambda1, 0,1), gsl_matrix_get(lambda1, 0,2), gsl_matrix_get(lambda1, 0,3));
printf("Lorentz Boost Matrix 1: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 1,0), gsl_matrix_get(lambda1, 1,1), gsl_matrix_get(lambda1, 1,2), gsl_matrix_get(lambda1, 1,3));
printf("Lorentz Boost Matrix 2: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 2,0), gsl_matrix_get(lambda1, 2,1), gsl_matrix_get(lambda1, 2,2), gsl_matrix_get(lambda1, 2,3));
printf("Lorentz Boost Matrix 3: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 3,0), gsl_matrix_get(lambda1, 3,1), gsl_matrix_get(lambda1, 3,2), gsl_matrix_get(lambda1, 3,3));
printf("Before Check: %e %e %e %e\n ",gsl_vector_get(p_ph_prime, 0), gsl_vector_get(p_ph_prime, 1), gsl_vector_get(p_ph_prime, 2), gsl_vector_get(p_ph_prime, 3));
*/
//double check vector for 0 norm condition if photon
if (object == 'p')
{
//printf("In if\n");
boosted_p=zeroNorm(gsl_vector_ptr(p_ph_prime, 0));
}
else
{
boosted_p=gsl_vector_ptr(p_ph_prime, 0);
}
//printf("After Check: %e %e %e %e\n ", *(boosted_p+0),*(boosted_p+1),*(boosted_p+2),*(boosted_p+3) );
}
else
{
//printf("in else");
//double check vector for 0 norm condition
if (object=='p')
{
boosted_p=zeroNorm(p_ph);
}
else
{
//if 4 momentum isnt for photon and there is no boost to be done, we dont care about normality and just want back what was passed to lorentz boost
boosted_p=gsl_vector_ptr(&p.vector, 0);
}
}
//assign values to result
*(result+0)=*(boosted_p+0);
*(result+1)=*(boosted_p+1);
*(result+2)=*(boosted_p+2);
*(result+3)=*(boosted_p+3);
//free up memory
//free(boosted_p);
gsl_matrix_free (lambda1); gsl_vector_free(p_ph_prime);
}
double *zeroNorm(double *p_ph)
{
//ensures zero norm condition of photon 4 monetum is held
int i=0;
double normalizing_factor=0;
gsl_vector_view p=gsl_vector_view_array((p_ph+1), 3); //make last 3 elements of p_ph pointer into vector
if (*(p_ph+0) != gsl_blas_dnrm2(&p.vector ) )
{
normalizing_factor=(gsl_blas_dnrm2(&p.vector ));
//printf("in zero norm if\n");
//go through and correct 4 momentum assuming the energy is correct
/*
for (i=1;i<4;i++)
{
*(p_ph+i)= ((*(p_ph+i))/(normalizing_factor))*(*(p_ph+0));
}
*/
*(p_ph+1)= ((*(p_ph+1))/(normalizing_factor))*(*(p_ph+0));
*(p_ph+2)= ((*(p_ph+2))/(normalizing_factor))*(*(p_ph+0));
*(p_ph+3)= ((*(p_ph+3))/(normalizing_factor))*(*(p_ph+0));
}
/*
if (pow((*(p_ph+0)),2) != ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) )
{
printf("This isnt normalized in the function\nThe difference is: %e\n", pow((*(p_ph+0)),2) - ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) );
}
*/ //normalized within a factor of 10^-53
return p_ph;
}
int findNearestPropertiesAndMinMFP( struct photon *ph, int num_ph, int array_num, double *time_step, double *x, double *y, double *velx, double *vely, double *dens_lab,\
double *temp, double *n_dens_lab, double *n_vx, double *n_vy,double *n_temp, gsl_rng * rand)
{
int i=0, j=0, min_index=0;
double ph_x=0, ph_y=0, ph_phi=0, dist=0, dist_min=1e12;
double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates
double ph_v_norm=0, fl_v_norm=0;
double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_temp_tmp=0 ;
double rnd_tracker=0, n_dens_lab_min=0, n_vx_min=0, n_vy_min=0, n_temp_min=0;
int num_threads=omp_get_max_threads();
int index=0;
double mfp=0,min_mfp=0, beta=0;
//initialize gsl random number generator fo each thread
const gsl_rng_type *rng_t;
gsl_rng **rng;
gsl_rng_env_setup();
rng_t = gsl_rng_default;
rng = (gsl_rng **) malloc((num_threads ) * sizeof(gsl_rng *)); //minus 1 because master thread already has rand initalized
rng[0]=rand;
//#pragma omp parallel for num_threads(nt)
for(i=1;i<num_threads;i++)
{
rng[i] = gsl_rng_alloc (rng_t);
gsl_rng_set(rng[i],gsl_rng_get(rand));
}
//go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away
//can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius
//or just parallelize this part here
min_mfp=1e12;
#pragma omp parallel for firstprivate( ph_x, ph_y, ph_phi, dist_min, dist, j, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp )
for (i=0;i<num_ph; i++)
{
//printf("%e,%e\n", ((ph+i)->r0), ((ph+i)->r1));
ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate
ph_y=((ph+i)->r2);
//printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y);
ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0));
dist_min=1e12;//set dist to impossible value to make sure at least first distance calulated is saved
for(j=0;j<array_num;j++)
{
//if the distance between them is within 3e9, to restrict number of possible calculations, calulate the total distance between the box and photon
if ( (fabs(ph_x- (*(x+j)))<3e9) && (fabs(ph_y- (*(y+j)))<3e9))
{
//printf("In if statement\n");
dist= pow(pow(ph_x- (*(x+j)), 2.0) + pow(ph_y- (*(y+j)) , 2.0),0.5);
//printf("Dist calculated as: %e, index: %d\n", dist, j);
//printf("In outer if statement, OLD: %e, %d\n", dist_min, min_index);
if((dist<dist_min))
{
//printf("In innermost if statement, OLD: %e, %d\n", dist_min, min_index);
dist_min=dist; //save new minimum distance
min_index=j; //save index
//printf("New Min dist: %e, New min Index: %d\n", dist_min, min_index);
}
}
}
//save values
/*
*(n_dens_lab+i)= (*(dens_lab+min_index));
*(n_vx+i)= (*(velx+min_index));
*(n_vy+i)= (*(vely+min_index));
*(n_temp+i)= (*(temp+min_index));
*/
(n_dens_lab_tmp)= (*(dens_lab+min_index));
(n_vx_tmp)= (*(velx+min_index));
(n_vy_tmp)= (*(vely+min_index));
(n_temp_tmp)= (*(temp+min_index));
fl_v_x=(*(velx+min_index))*cos(ph_phi);
fl_v_y=(*(velx+min_index))*sin(ph_phi);
fl_v_z=(*(vely+min_index));
fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5);
ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5);
//(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product
(n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined
beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5);
//put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case
rnd_tracker=0;
//while ((rnd_tracker<=0) || (rnd_tracker>=1))
//{
rnd_tracker=gsl_rng_uniform_pos(rng[omp_get_thread_num()]);
//printf("Rnd_tracker: %e Thread number %d \n",rnd_tracker, omp_get_thread_num() );
//}
mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOMP_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths
//if (mfp<0)
//{
//printf("\nThread: %d Photon: %d mfp: %e cos_angle: %e beta: %e dens_lab: %e rnd_tracker: %e\n\n",omp_get_thread_num(), i, mfp, n_cosangle , beta,n_dens_lab_tmp, rnd_tracker );
//}
#pragma omp critical
if ( mfp<min_mfp)
{
min_mfp=mfp;
n_dens_lab_min= n_dens_lab_tmp;
n_vx_min= n_vx_tmp;
n_vy_min= n_vy_tmp;
n_temp_min= n_temp_tmp;
index=i;
//printf("Thread is %d. new min: %e for photon %d with block properties: %e, %e, %e\n", omp_get_thread_num(), mfp, index, n_vx_tmp, n_vy_tmp, n_temp_tmp);
#pragma omp flush(min_mfp)
}
}
//free rand number generator
for (i=1;i<num_threads;i++)
{
gsl_rng_free(rng[i]);
}
free(rng);
*(n_dens_lab)= n_dens_lab_min;
*(n_vx)= n_vx_min;
*(n_vy)= n_vy_min;
*(n_temp)= n_temp_min;
(*time_step)=min_mfp/C_LIGHT;
return index;
}
void updatePhotonPosition(struct photon *ph, int num_ph, double t)
{
//move photons by speed of light
int i=0;
double old_position=0, new_position=0;
for (i=0;i<num_ph;i++)
{
old_position= pow( pow(ph->r0,2)+pow(ph->r1,2)+pow(ph->r2,2), 0.5 );
((ph+i)->r0)+=(((ph+i)->p1)/((ph+i)->p0))*C_LIGHT*t; //update x position
((ph+i)->r1)+=(((ph+i)->p2)/((ph+i)->p0))*C_LIGHT*t;//update y
((ph+i)->r2)+=(((ph+i)->p3)/((ph+i)->p0))*C_LIGHT*t;//update z
new_position= pow( pow(ph->r0,2)+pow(ph->r1,2)+pow(ph->r2,2), 0.5 );
if ((new_position-old_position)/t > C_LIGHT)
{
printf("PHOTON NUMBER %d IS SUPERLUMINAL. ITS SPEED IS %e c.\n", i, ((new_position-old_position)/t)/C_LIGHT);
}
//printf("In update function: %e, %e, %e, %e, %e, %e, %e\n",((ph+i)->r0), ((ph+i)->r1), ((ph+i)->r2), t, ((ph+i)->p1)/((ph+i)->p0), ((ph+i)->p2)/((ph+i)->p0), ((ph+i)->p3)/((ph+i)->p0) );
}
//printf("In update function: %e, %e, %e, %e\n",t, ((ph)->p1)/((ph)->p0), ((ph)->p2)/((ph)->p0), ((ph)->p3)/((ph)->p0) );
}
void photonScatter(struct photon *ph, double flash_vx, double flash_vy, double fluid_temp, gsl_rng * rand)
{
//function to perform single photon scattering
double ph_phi=0;
double *ph_p=malloc(4*sizeof(double)); //pointer to hold only photon 4 momentum @ start
double *el_p_comov=malloc(4*sizeof(double));//pointer to hold the electron 4 momenta in comoving frame
double *ph_p_comov=malloc(4*sizeof(double));//pointer to hold the comoving photon 4 momenta
double *fluid_beta=malloc(3*sizeof(double));//pointer to hold fluid velocity vector
double *negative_fluid_beta=malloc(3*sizeof(double));//pointer to hold negative fluid velocity vector
/*
printf("%p\n", ph_p);
ph_p=calloc(4*sizeof(double),0);
printf("%p\n", ph_p);
el_p_comov=calloc(4*sizeof(double),0);
printf("%p\n", el_p_comov);
ph_p_comov=calloc(4*sizeof(double),0);
printf("%p\n", ph_p_comov);
fluid_beta=calloc(3*sizeof(double),0);
printf("%p\n", fluid_beta);
negative_fluid_beta=calloc(3*sizeof(double),0);
printf("Done calling calloc\n");
*/
ph_phi=atan2((ph->r1), ((ph->r0)));
//printf("ph_phi=%e\n", ph_phi);
//convert flash coordinated into MCRaT coordinates
//printf("Getting fluid_beta\n");
(*(fluid_beta+0))=flash_vx*cos(ph_phi);
(*(fluid_beta+1))=flash_vx*sin(ph_phi);
(*(fluid_beta+2))=flash_vy;
//fill in photon 4 momentum
//printf("filling in 4 momentum in photonScatter\n");
*(ph_p+0)=(ph->p0);
*(ph_p+1)=(ph->p1);
*(ph_p+2)=(ph->p2);
*(ph_p+3)=(ph->p3);
/*
printf("Unscattered Photon in Lab frame: %e, %e, %e,%e\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3));
printf("Fluid Beta: %e, %e, %e\n", *(fluid_beta+0),*(fluid_beta+1), *(fluid_beta+2));
*/
//first we bring the photon to the fluid's comoving frame
lorentzBoost(fluid_beta, ph_p, ph_p_comov, 'p');
//printf("Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3);
/*
printf("Before Scattering, In Comov_frame:\n");
printf("ph_comov: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3));
*/
//second we generate a thermal electron at the correct temperature
singleElectron(el_p_comov, fluid_temp, ph_p_comov, rand);
//printf("el_comov: %e, %e, %e,%e\n", *(el_p_comov+0), *(el_p_comov+1), *(el_p_comov+2), *(el_p_comov+3));
//third we perform the scattering and save scattered photon 4 monetum in ph_p_comov @ end of function
singleComptonScatter(el_p_comov, ph_p_comov, rand);
//printf("After Scattering, After Lorentz Boost to Comov frame: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3));
//fourth we bring the photon back to the lab frame
*(negative_fluid_beta+0)=-1*( *(fluid_beta+0));
*(negative_fluid_beta+1)=-1*( *(fluid_beta+1));
*(negative_fluid_beta+2)=-1*( *(fluid_beta+2));
lorentzBoost(negative_fluid_beta, ph_p_comov, ph_p, 'p');
//printf("Scattered Photon in Lab frame: %e, %e, %e,%e\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3));
//printf("Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3);
//printf("Old: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3));
//assign the photon its new lab 4 momentum
(ph->p0)=(*(ph_p+0));
(ph->p1)=(*(ph_p+1));
(ph->p2)=(*(ph_p+2));
(ph->p3)=(*(ph_p+3));
//printf("Done assigning values to original struct\n");
free(el_p_comov);
//printf("done here\n");
free(ph_p_comov);
//printf("done here\n");
free(fluid_beta); // ?maybe not? getting an error checksum for freed object - object was probably modified after being freed.
//printf("done here\n");
free(negative_fluid_beta);
//printf("done here\n");
free(ph_p);
//printf("done here\n");
ph_p=NULL;negative_fluid_beta=NULL;ph_p_comov=NULL; el_p_comov=NULL;
}
void singleElectron(double *el_p, double temp, double *ph_p, gsl_rng * rand)
{
//generates an electron with random energy
double factor=0, gamma=0;
double y_dum=0, f_x_dum=0, x_dum=0, beta_x_dum=0, beta=0, phi=0, theta=0, ph_theta=0, ph_phi=0;
gsl_matrix *rot= gsl_matrix_calloc (3, 3); //create matrix thats 3x3 to do rotation
gsl_vector_view el_p_prime ; //create vector to hold rotated electron 4 momentum
gsl_vector *result=gsl_vector_alloc (3);
//printf("Temp in singleElectron: %e\n", temp);
if (temp>= 1e7)
{
//printf("In if\n");
factor=K_B*temp/(M_EL*pow(C_LIGHT,2.0));
y_dum=1; //initalize loop to get a random gamma from the distribution of electron velocities
f_x_dum=0;
while ((isnan(f_x_dum) !=0) || (y_dum>f_x_dum) )
{
x_dum=gsl_rng_uniform_pos(rand)*(1+100*factor);
beta_x_dum=pow(1-(pow(x_dum, -2.0)) ,0.5);
y_dum=gsl_rng_uniform(rand)/2.0;
f_x_dum=pow(x_dum,2)*(beta_x_dum/gsl_sf_bessel_Kn (2, 1.0/factor))*exp(-1*x_dum/factor); //not sure if this is right is giving small values of gamma -> beta=nan
//printf("Choosing a Gamma: xdum: %e, f_x_dum: %e, y_dum: %e\n", x_dum, f_x_dum, y_dum);
}
gamma=x_dum;
}
else
{
//printf("In else\n");
factor=pow(K_B*temp/M_EL,0.5);
//calculate a random gamma from 3 random velocities drawn from a gaussian distribution with std deviation of "factor"
gamma=pow( 1- (pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+ pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2) ) ,-0.5);
}
//printf("Chosen Gamma: %e\n",gamma);
beta=pow( 1- (1/pow( gamma,2.0 )) ,0.5);
//printf("Beta is: %e in singleElectron\n", beta);
phi=gsl_rng_uniform(rand)*2*M_PI;
y_dum=1; //initalize loop to get a random theta
f_x_dum=0;
while (y_dum>f_x_dum)
{
y_dum=gsl_rng_uniform(rand)*1.3;
x_dum=gsl_rng_uniform(rand)*M_PI;
f_x_dum=sin(x_dum)*(1-(beta*cos(x_dum)));
}
theta=x_dum;
//printf("Beta: %e\tPhi: %e\tTheta: %e\n",beta,phi, theta);
//fill in electron 4 momentum NOT SURE WHY THE ORDER IS AS SUCH SEEMS TO BE E/c, pz,py,px!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
*(el_p+0)=gamma*(M_EL)*(C_LIGHT);
*(el_p+1)=gamma*(M_EL)*(C_LIGHT)*beta*cos(theta);
*(el_p+2)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*sin(phi);
*(el_p+3)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*cos(phi);
//printf("Old: %e, %e, %e,%e\n", *(el_p+0), *(el_p+1), *(el_p+2), *(el_p+3));
el_p_prime=gsl_vector_view_array((el_p+1), 3);
//find angles of photon NOT SURE WHY WERE CHANGING REFERENCE FRAMES HERE???!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ph_phi=atan2(*(ph_p+2), *(ph_p+3)); //Double Check
ph_theta=atan2(pow( pow(*(ph_p+2),2)+ pow(*(ph_p+3),2) , 0.5) , (*(ph_p+1)) );
//printf("Calculated Photon phi and theta in singleElectron:%e, %e\n", ph_phi, ph_theta);
//fill in rotation matrix to rotate around x axis to get rid of phi angle
gsl_matrix_set(rot, 1,1,1);
gsl_matrix_set(rot, 2,2,cos(ph_theta));
gsl_matrix_set(rot, 0,0,cos(ph_theta));
gsl_matrix_set(rot, 0,2,-sin(ph_theta));
gsl_matrix_set(rot, 2,0,sin(ph_theta));
gsl_blas_dgemv(CblasNoTrans, 1, rot, &el_p_prime.vector, 0, result);
/*
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2));
printf("Middle: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(result,0), gsl_vector_get(result,1), gsl_vector_get(result,2));
*/
gsl_matrix_set_all(rot,0);
gsl_matrix_set(rot, 0,0,1);
gsl_matrix_set(rot, 1,1,cos(-ph_phi));
gsl_matrix_set(rot, 2,2,cos(-ph_phi));
gsl_matrix_set(rot, 1,2,-sin(-ph_phi));
gsl_matrix_set(rot, 2,1,sin(-ph_phi));
gsl_blas_dgemv(CblasNoTrans, 1, rot, result, 0, &el_p_prime.vector);
/*
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2));
printf("Final EL_P_vec: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(&el_p_prime.vector,0), gsl_vector_get(&el_p_prime.vector,1), gsl_vector_get(&el_p_prime.vector,2));
*/
//gsl_rng_free (rand);
//printf("freeing pointers in singleElectron\n");
gsl_matrix_free (rot);gsl_vector_free(result);
//printf("Done freeing pointers in singleElectron\n");
}
void singleComptonScatter(double *el_comov, double *ph_comov, gsl_rng * rand)
{
//This routine performs a Compton scattering between a photon and a moving electron.
int i=0;
double *el_v=malloc(3*sizeof(double));
double *negative_el_v=malloc(3*sizeof(double));
double *ph_p_prime=malloc(4*sizeof(double));//use this to keep track of how the ph 4 momentum changes with each rotation
double *el_p_prime=malloc(4*sizeof(double));
double phi0=0, phi1=0, phi=0, theta=0;
double y_dum, f_x_dum, x_dum;
gsl_matrix *rot0= gsl_matrix_calloc (3, 3); //create matricies thats 3x3 to do rotations
gsl_matrix *rot1= gsl_matrix_calloc (3, 3);
gsl_vector *result0=gsl_vector_alloc (3); //vectors to hold results of rotations
gsl_vector *result1=gsl_vector_alloc (3);
gsl_vector *result=gsl_vector_alloc (4);
gsl_vector *whole_ph_p=gsl_vector_alloc (4);
gsl_vector_view ph_p ; //create vector to hold comoving photon and electron 4 momentum
gsl_vector_view el_p ;
//fill in electron velocity array and photon 4 momentum
*(el_v+0)=(*(el_comov+1))/(*(el_comov+0));
*(el_v+1)=(*(el_comov+2))/(*(el_comov+0));
*(el_v+2)=(*(el_comov+3))/(*(el_comov+0));
//printf("el_v: %e, %e, %e\n", *(el_v+0), *(el_v+1), *(el_v+2));
//lorentz boost into frame where the electron is stationary
lorentzBoost(el_v, el_comov, el_p_prime, 'e');
lorentzBoost(el_v, ph_comov, ph_p_prime, 'p');
//printf("New ph_p in electron rest frame: %e, %e, %e,%e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3));
ph_p=gsl_vector_view_array((ph_p_prime+1), 3);
el_p=gsl_vector_view_array(el_p_prime,4);
phi0=atan2(*(ph_p_prime+2), *(ph_p_prime+1) );
//printf("Photon Phi: %e\n", phi0);
//rotate the axes so that the photon incomes along the x-axis
gsl_matrix_set(rot0, 2,2,1);
gsl_matrix_set(rot0, 0,0,cos(-phi0));
gsl_matrix_set(rot0, 1,1,cos(-phi0));
gsl_matrix_set(rot0, 0,1,-sin(-phi0));
gsl_matrix_set(rot0, 1,0,sin(-phi0));
gsl_blas_dgemv(CblasNoTrans, 1, rot0, &ph_p.vector, 0, result0);
/*
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot0, 0,0), gsl_matrix_get(rot0, 0,1), gsl_matrix_get(rot0, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot0, 1,0), gsl_matrix_get(rot0, 1,1), gsl_matrix_get(rot0, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot0, 2,0), gsl_matrix_get(rot0, 2,1), gsl_matrix_get(rot0, 2,2));
*/
//set values of ph_p_prime equal to the result and get new phi from result
*(ph_p_prime+1)=gsl_vector_get(result0,0);
*(ph_p_prime+2)=0;//gsl_vector_get(result,1); //just directly setting it to 0 now?
*(ph_p_prime+3)=gsl_vector_get(result0,2);
phi1=atan2(gsl_vector_get(result0,2), gsl_vector_get(result0,0));
/*
printf("rotation 1: %e, %e, %e\n", *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3));
printf("Photon Phi: %e\n", phi1);
printf("make sure the vector view is good: %e, %e, %e,%e\n", *(ph_p_prime+0), gsl_vector_get(&ph_p.vector,0), gsl_vector_get(&ph_p.vector,1), gsl_vector_get(&ph_p.vector,2));
*/
//rotate around y to bring it all along x
gsl_matrix_set(rot1, 1,1,1);
gsl_matrix_set(rot1, 0,0,cos(-phi1));
gsl_matrix_set(rot1, 2,2,cos(-phi1));
gsl_matrix_set(rot1, 0,2,-sin(-phi1));
gsl_matrix_set(rot1, 2,0,sin(-phi1));
gsl_blas_dgemv(CblasNoTrans, 1, rot1, &ph_p.vector, 0, result1);
/*
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot1, 0,0), gsl_matrix_get(rot1, 0,1), gsl_matrix_get(rot1, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot1, 1,0), gsl_matrix_get(rot1, 1,1), gsl_matrix_get(rot1, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot1, 2,0), gsl_matrix_get(rot1, 2,1), gsl_matrix_get(rot1, 2,2));
*/
//set values of ph_p_prime equal to the result and get new phi from result
*(ph_p_prime+1)=*(ph_p_prime+0);//why setting it to the energy?
*(ph_p_prime+2)=gsl_vector_get(result1,1);
*(ph_p_prime+3)=0; //just directly setting it to 0 now?
//printf("rotation 2: %e, %e, %e, %e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3));
//generate random theta and phi angles for scattering
phi=gsl_rng_uniform(rand)*2*M_PI;
//printf("Phi: %e\n", phi);
y_dum=1; //initalize loop to get a random theta
f_x_dum=0;
while (y_dum>f_x_dum)
{
y_dum=gsl_rng_uniform(rand)*1.09;
x_dum=gsl_rng_uniform(rand)*M_PI;
f_x_dum=sin(x_dum)*(1+pow(cos(x_dum),2));
}
theta=x_dum;
//printf("Theta: %e\n", theta);
//perform scattering and compute new 4-momenta of electron and photon
//scattered photon 4 momentum
gsl_vector_set(result, 0, (*(ph_p_prime+0))/(1+ (( (*(ph_p_prime+0))*(1-cos(theta)) )/(M_EL*C_LIGHT )) ) ); //DOUBLE CHECK HERE!!!!
gsl_vector_set(result, 1, gsl_vector_get(result,0)*cos(theta) );
gsl_vector_set(result, 2, gsl_vector_get(result,0)*sin(theta)*sin(phi) );
gsl_vector_set(result, 3, gsl_vector_get(result,0)*sin(theta)*cos(phi) );
//printf("%e\n", gsl_vector_get(result,0));
//calculate electron 4 momentum OPTIMIZE HERE: DONT USE A FOR LOOP HERE!!!! Done
//prescattered photon 4 momentum
gsl_vector_set(whole_ph_p, 0, (*(ph_p_prime+0)));
gsl_vector_set(whole_ph_p, 1, (*(ph_p_prime+1)));
gsl_vector_set(whole_ph_p, 2, (*(ph_p_prime+2)));
gsl_vector_set(whole_ph_p, 3, (*(ph_p_prime+3)));
/*
for (i=0;i<4;i++)
{
gsl_vector_set(whole_ph_p, i, (*(ph_p_prime+i)));
}
*/
gsl_vector_sub(whole_ph_p,result); //resut is saved into ph_p vector, unscattered-scattered 4 mometum of photon
gsl_vector_add(&el_p.vector ,whole_ph_p);
/*
printf("After scattering:\n");
printf("el_p: %e, %e, %e,%e\n", gsl_vector_get(&el_p.vector,0), gsl_vector_get(&el_p.vector,1), gsl_vector_get(&el_p.vector,2), gsl_vector_get(&el_p.vector,3));
printf("ph_p: %e, %e, %e,%e\n", gsl_vector_get(result,0), gsl_vector_get(result,1), gsl_vector_get(result,2), gsl_vector_get(result,3));
*/
//rotate back to comoving frame
*(ph_p_prime+0)=gsl_vector_get(result,0);
*(ph_p_prime+1)=gsl_vector_get(result,1); //set values of photon prime momentum from doing the scattering to use the vector view of it in dot product
*(ph_p_prime+2)=gsl_vector_get(result,2);
*(ph_p_prime+3)=gsl_vector_get(result,3);
gsl_matrix_set_all(rot1,0);
gsl_matrix_set(rot1, 1,1,1);
gsl_matrix_set(rot1, 0,0,cos(-phi1));
gsl_matrix_set(rot1, 2,2,cos(-phi1));
gsl_matrix_set(rot1, 0,2,sin(-phi1));
gsl_matrix_set(rot1, 2,0,-sin(-phi1));
gsl_blas_dgemv(CblasNoTrans, 1, rot1, &ph_p.vector, 0, result1);
/*
printf("Photon Phi: %e\n", phi1);
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot1, 0,0), gsl_matrix_get(rot1, 0,1), gsl_matrix_get(rot1, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot1, 1,0), gsl_matrix_get(rot1, 1,1), gsl_matrix_get(rot1, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot1, 2,0), gsl_matrix_get(rot1, 2,1), gsl_matrix_get(rot1, 2,2));
*/
//set values of ph_p_prime to result1 from undoing 2nd rotation
*(ph_p_prime+1)=gsl_vector_get(result1,0);
*(ph_p_prime+2)=gsl_vector_get(result1,1);
*(ph_p_prime+3)=gsl_vector_get(result1,2);
//printf("Undo rotation 2: %e, %e, %e, %e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3));
//ignore the electron, dont care about it, undo the first rotation
gsl_matrix_set_all(rot0,0);
gsl_matrix_set(rot0, 2,2,1);
gsl_matrix_set(rot0, 0,0,cos(-phi0));
gsl_matrix_set(rot0, 1,1,cos(-phi0));
gsl_matrix_set(rot0, 0,1,sin(-phi0));
gsl_matrix_set(rot0, 1,0,-sin(-phi0));
gsl_blas_dgemv(CblasNoTrans, 1, rot0, &ph_p.vector, 0, result0);
/*
printf("Photon Phi: %e\n", phi0);
printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot0, 0,0), gsl_matrix_get(rot0, 0,1), gsl_matrix_get(rot0, 0,2));
printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot0, 1,0), gsl_matrix_get(rot0, 1,1), gsl_matrix_get(rot0, 1,2));
printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot0, 2,0), gsl_matrix_get(rot0, 2,1), gsl_matrix_get(rot0, 2,2));
*/
*(ph_p_prime+1)=gsl_vector_get(result0,0);
*(ph_p_prime+2)=gsl_vector_get(result0,1);
*(ph_p_prime+3)=gsl_vector_get(result0,2);
//printf("Undo rotation 1: %e, %e, %e, %e\n", *(ph_p_prime+0), *(ph_p_prime+1), *(ph_p_prime+2), *(ph_p_prime+3));
//deboost photon to lab frame
*(negative_el_v+0)=(-1*(*(el_v+0)));
*(negative_el_v+1)=(-1*(*(el_v+1)));
*(negative_el_v+2)=(-1*(*(el_v+2)));
lorentzBoost(negative_el_v, ph_p_prime, ph_comov, 'p');
//printf("Undo boost 1: %e, %e, %e, %e\n", *(ph_comov+0), *(ph_comov+1), *(ph_comov+2), *(ph_comov+3));
gsl_matrix_free(rot0); gsl_matrix_free(rot1);gsl_vector_free(result0);gsl_vector_free(result1);gsl_vector_free(result);
//gsl_rng_free (rand);
gsl_vector_free(whole_ph_p);free(ph_p_prime);free(el_p_prime);free(el_v); free(negative_el_v);
}
double averagePhotonEnergy(struct photon *ph, int num_ph)
{
//to calculate average photon energy
int i=0;
double sum=0;
for (i=0;i<num_ph;i++)
{
sum+=((ph+i)->p0);
}
return (sum*C_LIGHT)/num_ph;
}
void phScattStats(struct photon *ph, int ph_num, int *max, int *min, double *avg )
{
int temp_max=0, temp_min=-1, i=0;
double sum=0;
for (i=0;i<ph_num;i++)
{
sum+=((ph+i)->num_scatt);
if (((ph+i)->num_scatt) > temp_max )
{
temp_max=((ph+i)->num_scatt);
//printf("The new max is: %d\n", temp_max);
}
if ((i==0) || (((ph+i)->num_scatt)<temp_min))
{
temp_min=((ph+i)->num_scatt);
//printf("The new min is: %d\n", temp_min);
}
}
*avg=sum/ph_num;
*max=temp_max;
*min=temp_min;
}
void cylindricalPrep(double *gamma, double *vx, double *vy, double *dens, double *dens_lab, double *pres, double *temp, int num_array)
{
double gamma_infinity=1, t_comov=1*pow(10, 7), ddensity=3e-9;// the comoving temperature in Kelvin, and the comoving density in g/cm^2
int i=0;
double vel=pow(1-pow(gamma_infinity, -2.0) ,0.5), lab_dens=gamma_infinity*ddensity;
for (i=0; i<num_array;i++)
{
*(gamma+i)=gamma_infinity;
*(vx+i)=0;
*(vy+i)=vel;
*(dens+i)=ddensity;
*(dens_lab+i)=lab_dens;
*(pres+i)=(A_RAD*pow(t_comov, 4.0))/(3*pow(C_LIGHT, 2.0));
*(temp+i)=pow(3*(*(pres+i))*pow(C_LIGHT,2.0)/(A_RAD) ,1.0/4.0); //just assign t_comov
}
}
void sphericalPrep(double *r, double *x, double *y, double *gamma, double *vx, double *vy, double *dens, double *dens_lab, double *pres, double *temp, int num_array)
{
double gamma_infinity=100, lumi=1e52, r00=1e8;
double vel=0;
int i=0;
for (i=0;i<num_array;i++)
{
if ((*(r+i)) >= (r00*gamma_infinity))
{
*(gamma+i)=gamma_infinity;
*(pres+i)=(lumi*pow(r00, 2.0/3.0)*pow(*(r+i), -8.0/3.0) )/(12.0*M_PI*C_LIGHT*pow(gamma_infinity, 4.0/3.0)*pow(C_LIGHT, 2.0));
}
else
{
*(gamma+i)=(*(r+i))/r00;
*(pres+i)=(lumi*pow(r00, 2.0))/(12.0*M_PI*C_LIGHT*pow(C_LIGHT, 2.0)*pow(*(r+i), 4.0) );
}
vel=pow(1-pow(*(gamma+i), -2.0) ,0.5);
*(vx+i)=(vel*(*(x+i)))/pow(pow(*(x+i), 2)+ pow(*(y+i), 2) ,0.5);
*(vy+i)=(vel*(*(y+i)))/pow(pow(*(x+i), 2)+ pow(*(y+i), 2) ,0.5);
*(dens+i)=lumi/(4*M_PI*pow(*(r+i), 2.0)*pow(C_LIGHT, 3.0)*gamma_infinity*(*(gamma+i)));
*(dens_lab+i)=*(dens+i)*(*(gamma+i));
}
}
|
softmax.c | /*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <stdlib.h>
#include "MKLDNN.h"
#include "omp.h"
#include <math.h>
void SoftmaxNCHW(unsigned long long input, int N, long long len)
{
float* inPtr = (float*)input;
#pragma omp parallel for
for (int i = 0; i < N; ++i)
{
float *pTemp = inPtr + i * len;
float pMax = pTemp[0];
for(long long j = 0; j < len; ++j)
{
if (pMax < pTemp[j])
{
pMax = pTemp[j];
}
}
float pSum = 0.0f;
for(long long j=0; j<len; ++j)
{
pTemp[j] = exp(pTemp[j] - pMax);
pSum += pTemp[j];
}
for(long long j=0; j < len; ++j)
{
pTemp[j] = pTemp[j] / pSum;
}
}
}
void SoftmaxCHWN(unsigned long long input, int N, long long len)
{
float* inPtr = (float*)input;
#pragma omp parallel for
for (int i = 0; i < N; ++i)
{
float *pTemp = inPtr + i;
float pMax = pTemp[0];
for (long long j = 0; j < len; ++j)
{
if ( pMax<pTemp[j*N]) pMax = pTemp[j*N];
}
float pSum = 0.0f;
for (long long j = 0; j < len; ++j)
{
pTemp[j*N] = exp(pTemp[j*N] - pMax);
pSum += pTemp[j*N];
}
for(long long j = 0; j < len; ++j)
{
pTemp[j*N] = pTemp[j*N] / pSum;
}
}
}
|
bli_dotv_bgq_int.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas at Austin nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_bgq_int
(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho,
cntx_t* restrict cntx
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
// And has the following additional copyright:
//
// (C) Copyright 2016-2020 Xilinx, Inc.
// All Rights Reserved.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> XlxHLSHandler;
std::unique_ptr<PragmaHandler> XlxhlsHandler;
std::unique_ptr<PragmaHandler> XlxAPHandler;
std::unique_ptr<PragmaHandler> XlxapHandler;
std::unique_ptr<PragmaHandler> XlxAUTOPILOTHandler;
std::unique_ptr<PragmaHandler> XlxautopilotHandler;
std::unique_ptr<PragmaHandler> ModelComposerHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// \brief Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma HLS|AP|AUTOPILOT ...
void HandleXlxPragma();
static bool HasDataflowAttribute(AttributeList *List);
void RemoveDataflowAttribute(ParsedAttributes &From);
struct ParsedAttributesWithRange;
void SinkParsedHLSUnrollPragmas(ParsedAttributesWithRange &To, Scope *P);
void SinkLabelAttributes(ParsedAttributesWithRange &To,
ParsedAttributesWithRange &From,
const Token &IdentTok);
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit(ParsedAttributes *ScopeAttr = nullptr) {
if (Self) {
Self->ExitScope(ScopeAttr);
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope(ParsedAttributes *ScopeAttr = nullptr);
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseHLSVariableExpression(StringRef optionName, bool noVoid=true);
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(
SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> Completer = llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseDataflowCompoundStatement();
StmtResult
ParseCompoundStatement(bool isStmtExpr = false,
ParsedAttributesWithRange *ScopeAttr = nullptr);
StmtResult
ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags,
ParsedAttributesWithRange *ScopeAttr = nullptr);
bool CheckLBraceForDataflowLoopBody();
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(ParsedAttributesWithRange &ScopeAttr,
SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement(ParsedAttributesWithRange &ScopeAttr);
StmtResult ParseForStatement(ParsedAttributesWithRange &ScopeAttr,
SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// \brief Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs, FixItLoc);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// \brief Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLLoopAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL || getLangOpts().HLSExt)
return ParseOpenCLLoopAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLLoopAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
void ParseXCLDependenceAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseXCLArrayViewAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
std::vector<IdentifierInfo *> &Ident,
std::vector<SourceLocation> &NamespaceLoc,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
GB_unaryop__lnot_int32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_uint32
// op(A') function: GB_tran__lnot_int32_uint32
// C type: int32_t
// A type: uint32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_uint32
(
int32_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 1000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
nest_lock.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
//need to use an OpenMP construct so that OMPT will be initalized
#pragma omp parallel num_threads(1)
print_ids(0);
omp_nest_lock_t nest_lock;
printf("%" PRIu64 ": &nest_lock: %lli\n", ompt_get_thread_data()->value, (ompt_wait_id_t) &nest_lock);
omp_init_nest_lock(&nest_lock);
print_fuzzy_address(1);
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(2);
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(3);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(4);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(5);
omp_destroy_nest_lock(&nest_lock);
print_fuzzy_address(6);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_init_nest_lock: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_first: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_prev: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_last: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_nest_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
14.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
int main()
{
double serial_sum, sum;
int n, i, threadid, tval, NoOfthreads;
printf("Enter number of threads: ");
scanf("%d", &NoOfthreads);
printf("array size: ");
scanf("%d", &n);
if ((NoOfthreads!=1) && (NoOfthreads!=2) && (NoOfthreads!=4) && (NoOfthreads!=8) && (NoOfthreads!= 16))
{
printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n");
exit(-1);
}
double array[n], check[n];
for(i=0; i<n; i++)
{
array[i] = i * 5;
check[i] = array[i];
}
sum = 0.0;
omp_set_num_threads(NoOfthreads);
#pragma omp parallel for
for(i=0; i<n; i++)
{
#pragma omp critical
sum = sum + array[i];
}
serial_sum = 0.0;
for(i=0; i<n; i++)
serial_sum = serial_sum + check[i];
if (serial_sum == sum)
printf("\n\n\t\t The Serial And Parallel Sums Are Equal\n");
else
{
printf("\n\\nt\t The Serial And Parallel Sums Are Unequal\n");
exit(1);
}
printf("\n\t\t The SumOfElements Of The array Using OpenMP Directives Is %lf\n", sum);
printf("\t\t The SumOfElements Of The array By Serial Calculation Is %lf\n\n", serial_sum);
printf("\n\t\t..........................................................................\n");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.