source
stringlengths
3
92
c
stringlengths
26
2.25M
eltwise.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_ELTWISE_H_ #define MACE_KERNELS_ELTWISE_H_ #include <algorithm> #include <functional> #include <memory> #include <utility> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { enum EltwiseType { SUM = 0, SUB = 1, PROD = 2, DIV = 3, MIN = 4, MAX = 5, NEG = 6, ABS = 7, SQR_DIFF = 8, POW = 9, EQUAL = 10, NONE = 11, }; static bool IsLogicalType(EltwiseType type) { return type == EQUAL; } inline index_t GetIndex(const std::vector<index_t> &shape, const std::vector<index_t> &index) { index_t idx = 0; for (size_t i = 0; i < shape.size(); ++i) { if (shape[i] > 1) { idx = idx * shape[i] + index[i]; } } return idx; } inline void IncreaseIndex(const std::vector<index_t> &shape, std::vector<index_t> *index) { for (index_t i = static_cast<index_t>(shape.size()) - 1; i >= 0; --i) { ++(*index)[i]; if ((*index)[i] >= shape[i]) { (*index)[i] -= shape[i]; } else { break; } } } template <typename T, typename DstType> inline void TensorGeneralBroadcastEltwise( const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const bool swapped, const std::vector<index_t> &input0_shape, const std::vector<index_t> &input1_shape, const std::vector<index_t> &output_shape, DstType *output) { const index_t output_size = std::accumulate( output_shape.begin(), output_shape.end(), 1, std::multiplies<index_t>()); std::vector<index_t> out_index(output_shape.size(), 0); switch (type) { case SUM: if (coeff.empty()) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] + input1[idx1]; IncreaseIndex(output_shape, &out_index); } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] * coeff_copy[0] + input1[idx1] * coeff_copy[1]; IncreaseIndex(output_shape, &out_index); } } break; case SUB: if (!swapped) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] - input1[idx1]; IncreaseIndex(output_shape, &out_index); } } else { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input1[idx1] - input0[idx0]; IncreaseIndex(output_shape, &out_index); } } break; case PROD: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] * input1[idx1]; IncreaseIndex(output_shape, &out_index); } break; case DIV: if (!swapped) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] / input1[idx1]; IncreaseIndex(output_shape, &out_index); } } else { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input1[idx1] / input0[idx0]; IncreaseIndex(output_shape, &out_index); } } break; case MIN: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::min(input1[idx1], input0[idx0]); IncreaseIndex(output_shape, &out_index); } break; case MAX: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::max(input1[idx1], input0[idx0]); IncreaseIndex(output_shape, &out_index); } break; case SQR_DIFF: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::pow(input1[idx1] - input0[idx0], 2.f); IncreaseIndex(output_shape, &out_index); } break; case POW: if (!swapped) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::pow(input0[idx0], input1[idx1]); IncreaseIndex(output_shape, &out_index); } } else { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::pow(input1[idx1], input0[idx0]); IncreaseIndex(output_shape, &out_index); } } break; case EQUAL: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input1[idx1] == input0[idx0]; IncreaseIndex(output_shape, &out_index); } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } template <typename T, typename DstType> inline void TensorBroadcastEltwise(const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const index_t diff_size, const index_t common_size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] + input1[i]; } } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] * coeff_copy[0] + input1[i] * coeff_copy[1]; } } } break; case SUB: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] - input1[i]; } } } else { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input1[i] - input0[i + d * common_size]; } } } break; case PROD: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] * input1[i]; } } break; case DIV: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] / input1[i]; } } } else { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input1[i] / input0[i + d * common_size]; } } } break; case MIN: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::min(input0[i + d * common_size], input1[i]); } } break; case MAX: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::max(input0[i + d * common_size], input1[i]); } } break; case SQR_DIFF: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::pow(input0[i + d * common_size] - input1[i], 2.f); } } break; case POW: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::pow(input0[i + d * common_size], input1[i]); } } } else { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::pow(input1[i], input0[i + d * common_size]); } } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < diff_size * common_size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < diff_size * common_size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] == input1[i]; } } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } // Multiplication is costly, so we specialize the following case. template <typename T, typename DstType> inline void TensorEltwise(const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const index_t size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] + input1[i]; } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * coeff_copy[0] + input1[i] * coeff_copy[1]; } } break; case SUB: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] - input1[i]; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1[i] - input0[i]; } } break; case PROD: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * input1[i]; } break; case DIV: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] / input1[i]; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1[i] / input0[i]; } } break; case MIN: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::min(input0[i], input1[i]); } break; case MAX: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::max(input0[i], input1[i]); } break; case SQR_DIFF: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i] - input1[i], 2.f); } break; case POW: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i], input1[i]); } } else { for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input1[i], input0[i]); } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] == input1[i]; } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } // Multiplication is costly, so we specialize the following case. template <typename T, typename DstType> inline void TensorScalarEltwise(const EltwiseType type, const T *input0, const T input1, const std::vector<float> &coeff, const index_t size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] + input1; } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * coeff_copy[0] + input1 * coeff_copy[1]; } } break; case SUB: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] - input1; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1 - input0[i]; } } break; case PROD: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * input1; } break; case DIV: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] / input1; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1 / input0[i]; } } break; case MIN: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::min(input0[i], input1); } break; case MAX: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::max(input0[i], input1); } break; case SQR_DIFF: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i] - input1, 2.f); } break; case POW: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i], input1); } } else { for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input1, input0[i]); } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] == input1; } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } template <typename T, typename DstType> inline void TensorEltwisePerChannel(const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const index_t batch0, const index_t batch1, const index_t channel, const index_t image_size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] + in1_ptr[c]; } } } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] * coeff_copy[0] + in1_ptr[c] * coeff_copy[1]; } } } } break; case SUB: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] - in1_ptr[c]; } } } } else { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in1_ptr[c] - in0_ptr[i]; } } } } break; case PROD: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] * in1_ptr[c]; } } } break; case DIV: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] / in1_ptr[c]; } } } } else { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in1_ptr[c] / in0_ptr[i]; } } } } break; case MIN: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::min(in0_ptr[i], in1_ptr[c]); } } } break; case MAX: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::max(in0_ptr[i], in1_ptr[c]); } } } break; case SQR_DIFF: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::pow(in0_ptr[i] - in1_ptr[c], 2.f); } } } break; case POW: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::pow(in0_ptr[i], in1_ptr[c]); } } } } else { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::pow(in1_ptr[c], in0_ptr[i]); } } } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < batch0 * channel * image_size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < batch0 * channel * image_size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] == in1_ptr[c]; } } } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } struct EltwiseFunctorBase { EltwiseFunctorBase(const EltwiseType type, const std::vector<float> &coeff, const float scalar_input, const int32_t scalar_input_index, const DataFormat data_format) : type_(type), coeff_(coeff), scalar_input_(scalar_input), scalar_input_index_(scalar_input_index), data_format_(data_format) {} EltwiseType type_; std::vector<float> coeff_; float scalar_input_; int32_t scalar_input_index_; DataFormat data_format_; }; template <DeviceType D, typename T> struct EltwiseFunctor : EltwiseFunctorBase { EltwiseFunctor(const EltwiseType type, const std::vector<float> &coeff, const float scalar_input, // float as it comes from arg const int32_t scalar_input_index, const DataFormat data_format) : EltwiseFunctorBase(type, coeff, scalar_input, scalar_input_index, data_format) {} template <typename DstType> MaceStatus DoEltwise(const Tensor *input0, const Tensor *input1, Tensor *output) { bool swapped = false; if (input0->size() < input1->size()) { std::swap(input0, input1); swapped = true; } if (scalar_input_index_ == 0) { swapped = !swapped; } // check if we can broadcast tensor uint32_t rank_diff = static_cast<uint32_t>(input0->dim_size() - input1->dim_size()); if (data_format_ == NCHW) { MACE_CHECK( (input0->dim_size() == 4) && ((input1->dim_size() == 0) || (input1->dim_size() == 4 && input1->dim(1) == input0->dim(1) && (input1->dim(0) == input0->dim(0) || input1->dim(0) == 1)) || (input1->dim_size() == 1 && input1->dim(0) == input0->dim(1))), "only support broadcast channel dimension"); } else { for (uint32_t i = 0; i < input1->dim_size(); ++i) { MACE_CHECK(input0->dim(rank_diff + i) == 1 || input1->dim(i) == 1 || input0->dim(rank_diff + i) == input1->dim(i), "Element-Wise op only support tail dimensions broadcast"); } } Tensor::MappingGuard input0_guard(input0); Tensor::MappingGuard input1_guard(input1); const T *input0_ptr = input0->data<T>(); const T *input1_ptr = input1->data<T>(); if (data_format_ == NCHW && input1->dim_size() > 0 && input1->size() < input0->size()) { MACE_RETURN_IF_ERROR(output->ResizeLike(input0)); Tensor::MappingGuard output_guard(output); DstType *output_ptr = output->mutable_data<DstType>(); TensorEltwisePerChannel( type_, input0_ptr, input1_ptr, coeff_, input0->dim(0), input1->dim_size() == 1 ? 1 : input1->dim(0), input0->dim(1), input0->dim(2) * input0->dim(3), swapped, output_ptr); } else { const std::vector<index_t> &input0_shape = input0->shape(); std::vector<index_t> input1_shape(rank_diff, 1); input1_shape.insert(input1_shape.end(), input1->shape().begin(), input1->shape().end()); std::vector<index_t> output_shape(input0->dim_size(), 0); for (unsigned int i = 0; i < input0_shape.size(); ++i) { output_shape[i] = std::max(input0_shape[i], input1_shape[i]); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); Tensor::MappingGuard output_guard(output); DstType *output_ptr = output->mutable_data<DstType>(); bool need_general_broadcast = false; for (uint32_t i = 0; i < input1->dim_size(); ++i) { if ((input0->dim(rank_diff + i) == 1 && input1->dim(i) > 1) || (input0->dim(rank_diff + i) > 1 && input1->dim(i) == 1)) { need_general_broadcast = true; break; } } if (need_general_broadcast) { TensorGeneralBroadcastEltwise(type_, input0_ptr, input1_ptr, coeff_, swapped, input0_shape, input1_shape, output_shape, output_ptr); } else if (input1->size() == input0->size()) { TensorEltwise(type_, input0_ptr, input1_ptr, coeff_, input0->size(), swapped, output_ptr); } else if (input1->size() < input0->size()) { if (input1->size() > 1) { index_t common_size = input1->size(); index_t diff_size = input0->size() / common_size; TensorBroadcastEltwise(type_, input0_ptr, input1_ptr, coeff_, diff_size, common_size, swapped, output_ptr); } else { TensorScalarEltwise(type_, input0_ptr, input1_ptr[0], coeff_, input0->size(), swapped, output_ptr); } } } return MACE_SUCCESS; } MaceStatus operator()(const Tensor *input0, const Tensor *input1, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); if (input1 == nullptr) { scalar_tensor_.Resize({}); Tensor::MappingGuard guard(&scalar_tensor_); auto scalar_data = scalar_tensor_.mutable_data<T>(); scalar_data[0] = static_cast<T>(scalar_input_); input1 = &scalar_tensor_; } if (IsLogicalType(type_)) { // as we do not have bool-type tensor, we use int type return DoEltwise<int32_t>(input0, input1, output); } else { return DoEltwise<T>(input0, input1, output); } } Tensor scalar_tensor_; }; #ifdef MACE_ENABLE_OPENCL template <typename T> struct EltwiseFunctor<DeviceType::GPU, T> : EltwiseFunctorBase { EltwiseFunctor(const EltwiseType type, const std::vector<float> &coeff, const float scalar_input, const int32_t scalar_input_index, const DataFormat data_format) : EltwiseFunctorBase(type, coeff, scalar_input, scalar_input_index, data_format) {} MaceStatus operator()(const Tensor *input0, const Tensor *input1, Tensor *output, StatsFuture *future); cl::Kernel kernel_; uint32_t kwg_size_; std::unique_ptr<BufferBase> kernel_error_; std::vector<index_t> input_shape_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_ELTWISE_H_
vednnLinearBackwardData.c
#include <stdint.h> #include "vednnLinearBackwardData.h" #ifdef VEDNN_USE_OPENMP #include <omp.h> extern int __vednn_omp_num_threads ; #endif static inline vednnError_t vednnLinearBackwardData_wrapper( vednnLinearBackwardData_t pFunc, const uint64_t inDim, const uint64_t outDim, const uint64_t nBatch, const void *pDataGradOut, const void *pDataWeight, void *pDataGradIn ) { if(nBatch == 1) { return pFunc(inDim, outDim, nBatch, pDataGradOut, pDataWeight, pDataGradIn); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { uint64_t nthreads = omp_get_num_threads() ; uint64_t threadid = omp_get_thread_num() ; uint64_t nBatchEach = nBatch / nthreads ; uint64_t remain = nBatch % nthreads ; uint64_t batchBegin = nBatchEach * threadid + ( threadid < remain ? threadid : remain ) ; uint64_t myBatch = nBatchEach + ( threadid < remain ? 1 : 0 ) ; if( myBatch == 0 ) { rc |= VEDNN_SUCCESS ; } else { float* _pDataGradOut = ((float *)pDataGradOut) + batchBegin * outDim ; float* _pDataGradIn = ((float *)pDataGradIn) + batchBegin * inDim ; rc |= pFunc(inDim, outDim, myBatch, _pDataGradOut, pDataWeight, _pDataGradIn); } } return rc; } } /* ----------------------------------------------------------------------- */ vednnError_t vednnLinearBackwardData( const uint64_t inDim, const uint64_t outDim, const uint64_t nBatch, const void *pDataGradOut, const void *pDataWeight, void *pDataGradIn ) { // [todo] add variations if( outDim<=128 && inDim >=256 ) { if( ((outDim&0x1))==0 && ((((uint64_t)pDataWeight)&0x7)==0) ) { return vednnLinearBackwardData_wrapper( vednnLinearBackwardData_o2XU128_waligned, inDim, outDim, nBatch, pDataGradOut, pDataWeight, pDataGradIn ) ; } else { return vednnLinearBackwardData_wrapper( vednnLinearBackwardData_oU128, inDim, outDim, nBatch, pDataGradOut, pDataWeight, pDataGradIn ) ; } } else if( outDim <= 256 ) { return vednnLinearBackwardData_wrapper( vednnLinearBackwardData_oU256, inDim, outDim, nBatch, pDataGradOut, pDataWeight, pDataGradIn ) ; } else if( ((outDim & 0x1) == 0) && ((((uint64_t)pDataWeight)&0x7)==0) && ((((uint64_t)pDataGradOut)&0x7)==0) ) { return vednnLinearBackwardData_wrapper( vednnLinearBackwardData_o2X_woaligned, inDim, outDim, nBatch, pDataGradOut, pDataWeight, pDataGradIn ) ; } else { return vednnLinearBackwardData_wrapper( vednnLinearBackwardData_default, inDim, outDim, nBatch, pDataGradOut, pDataWeight, pDataGradIn ) ; } }
mlp_mpi_example_f32.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include <libxsmm_dnn.h> #include <dnn_common.h> #include <mpi.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #define DETAILED_PROFILE #define N_PROF_THREADS 128 LIBXSMM_INLINE void my_init_buf_mlp(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } int main(int argc, char* argv[]) { /* Initialize the MPI environment */ int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if(provided < MPI_THREAD_MULTIPLE) { printf("The threading support level is lesser than that demanded.\n"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } float **act_libxsmm, **ref_act_libxsmm, **fil_libxsmm, **delact_libxsmm, **ref_delact_libxsmm, **delfil_libxsmm; float **bias_libxsmm, **delbias_libxsmm; unsigned char **relumask_libxsmm; int *label_libxsmm; void* scratch = NULL; size_t scratch_size = 0; libxsmm_matdiff_info norms; libxsmm_matdiff_clear(&norms); MPI_Request request[2]; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int n_procs = 4; int iters = 10; /* repetitions of benchmark */ int MB = 32; /* mini-batch size, "N" */ int global_MB = 32; int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */ int bn = 64; int bk = 64; int bc = 64; int *C; /* number of input feature maps, "C" */ int num_layers = 0; const char *const env_check = getenv("CHECK"); const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check)); #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; double l_total = 0.0; double l_fwd_fc[N_PROF_THREADS]; double l_bwdupd_fc[N_PROF_THREADS]; double l_allreduce[N_PROF_THREADS]; double l_optimizer[N_PROF_THREADS]; double l_fwd_loss[N_PROF_THREADS]; double l_bwd_loss[N_PROF_THREADS]; double first_bwdupd_compute = 0.0; double gflop = 0.0; int i, j, rank; double fil_size = 0.0; double act_size = 0.0; float lr = 0.2f; float loss_weight = 0.1f; libxsmm_datatype in_dt, out_dt, comp_dt; libxsmm_dnn_fc_eltw_fuse my_fuse; libxsmm_dnn_fc_fwd_config* libxsmm_dnn_fc_fwd; libxsmm_dnn_fc_bwd_config* libxsmm_dnn_fc_bwd; libxsmm_dnn_opt_config* libxsmm_dnn_opt; libxsmm_dnn_smax_fwd_config libxsmm_dnn_smax_fwd; libxsmm_dnn_smax_bwd_config libxsmm_dnn_smax_bwd; for (i = 0; i < N_PROF_THREADS; i++) { l_fwd_fc[i] = 0.0; l_bwdupd_fc[i] = 0.0; l_allreduce[i] = 0.0; l_optimizer[i] = 0.0; l_fwd_loss[i] = 0.0; l_bwd_loss[i] = 0.0; } if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); act_libxsmm = NULL; fil_libxsmm = NULL; delact_libxsmm = NULL; delfil_libxsmm = NULL; bias_libxsmm = NULL; delbias_libxsmm = NULL; relumask_libxsmm = NULL; label_libxsmm = NULL; /* reading new values from cli */ i = 1; num_layers = argc - 9; if (argc > i) iters = atoi(argv[i++]); if (argc > i) global_MB = atoi(argv[i++]); if (argc > i) fuse_type = atoi(argv[i++]); if (argc > i) type = *(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* Get the rank of the process */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &n_procs); MB = global_MB / n_procs; /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; if (type != 'A' && type != 'F' && type != 'B') { printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n"); return -1; } if ( (fuse_type < 0) || (fuse_type > 5) ) { printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n"); return -1; } #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif in_dt = LIBXSMM_DATATYPE_F32; out_dt = LIBXSMM_DATATYPE_F32; comp_dt = LIBXSMM_DATATYPE_F32; /* print some summary */ if (rank == 0 ) { printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", global_MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n"); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(global_MB*C[i]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, global_MB, C[i], (double)(global_MB*C[i]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(global_MB*C[i+1]*sizeof(float))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, global_MB, C[i+1], (double)(global_MB*C[i+1]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(global_MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", global_MB, C[num_layers+1], (double)(global_MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) ); } /* allocate data */ /* +2 because of the softwax layer */ act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) ); ref_act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) ); delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) ); ref_delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) ); for ( i = 0 ; i < num_layers+2; ++i ) { act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); } } fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); } bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); /* init data on every node for numa awarness */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf_mlp( act_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf_mlp( delact_libxsmm[i], MB*C[i], 0, 0 ); } /* Serial initialization of data on proc 0 */ if (rank == 0) { for ( i = 0 ; i < num_layers+2; ++i ) { ref_act_libxsmm[i] = (float*)libxsmm_aligned_malloc( global_MB*C[i]*sizeof(float), 2097152); /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { ref_delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( global_MB*C[i]*sizeof(float), 2097152); } } /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf_mlp( ref_act_libxsmm[i], global_MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf_mlp( ref_delact_libxsmm[i], global_MB*C[i], 0, 0 ); } } /* Scatter the activations to all processes */ for ( i = 0 ; i < num_layers+2; ++i ) { MPI_Scatter(ref_act_libxsmm[i], MB * C[i], MPI_FLOAT, act_libxsmm[i], MB * C[i], MPI_FLOAT, 0, MPI_COMM_WORLD); } /* Scatter the del_activations to all processes */ for ( i = 0 ; i < num_layers+1; ++i ) { MPI_Scatter(ref_delact_libxsmm[i], MB * C[i], MPI_FLOAT, delact_libxsmm[i], MB * C[i], MPI_FLOAT, 0, MPI_COMM_WORLD); } /* Now broadcast weights tensors */ for ( i = 0 ; i < num_layers; ++i ) { MPI_Bcast(fil_libxsmm[i], C[i]*C[i+1], MPI_FLOAT, 0, MPI_COMM_WORLD); } /* Now broadcast bias tensors */ for ( i = 0 ; i < num_layers; ++i ) { MPI_Bcast(bias_libxsmm[i], C[i], MPI_FLOAT, 0, MPI_COMM_WORLD); } if (rank == 0) { printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); } if ( fuse_type == 0 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE; } else if ( fuse_type == 1 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_BIAS; } else if ( fuse_type == 2 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_RELU_WITH_MASK; } else if ( fuse_type == 3 ) { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_BIAS_RELU_WITH_MASK; } else { my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE; } /* allocating handles */ libxsmm_dnn_fc_fwd = (libxsmm_dnn_fc_fwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_fwd_config) ); libxsmm_dnn_fc_bwd = (libxsmm_dnn_fc_bwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_bwd_config) ); libxsmm_dnn_opt = (libxsmm_dnn_opt_config*) malloc( num_layers*sizeof(libxsmm_dnn_opt_config) ); /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { libxsmm_dnn_fc_fwd[i] = setup_libxsmm_dnn_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse, in_dt, out_dt, comp_dt ); libxsmm_dnn_fc_bwd[i] = setup_libxsmm_dnn_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse, in_dt, out_dt, comp_dt ); libxsmm_dnn_opt[i] = setup_libxsmm_dnn_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr, in_dt, out_dt, comp_dt ); /* let's allocate and bind scratch */ if ( libxsmm_dnn_fc_fwd[i].scratch_size > 0 || libxsmm_dnn_fc_bwd[i].scratch_size > 0 || libxsmm_dnn_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( libxsmm_dnn_fc_fwd[i].scratch_size, libxsmm_dnn_fc_bwd[i].scratch_size), libxsmm_dnn_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } } /* softmax+loss is treated as N+! layer */ libxsmm_dnn_smax_fwd = setup_libxsmm_dnn_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, in_dt, out_dt, comp_dt ); libxsmm_dnn_smax_bwd = setup_libxsmm_dnn_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight, in_dt, out_dt, comp_dt ); if ( libxsmm_dnn_smax_fwd.scratch_size > 0 || libxsmm_dnn_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( libxsmm_dnn_smax_fwd.scratch_size, libxsmm_dnn_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_malloc( scratch_size, 2097152 ); my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } if (type == 'F') { if (rank == 0) { printf("##########################################\n"); printf("# Performance - FWD (custom-Storage) #\n"); printf("##########################################\n"); } MPI_Barrier(MPI_COMM_WORLD); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (j = 0; j < iters; ++j) { for ( i = 0; i < num_layers; ++i) { libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss_weight, 0, tid, scratch ); } } MPI_Barrier(MPI_COMM_WORLD); l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = 0; i < num_layers; ++i) { gflop += (2.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } if (rank == 0) { printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); } } if (type == 'B') { if (rank == 0) { printf("##########################################\n"); printf("# Performance - BWD (custom-Storage) #\n"); printf("##########################################\n"); } MPI_Barrier(MPI_COMM_WORLD); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (j = 0; j < iters; ++j) { libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); for ( i = num_layers-1; i > 0; --i) { libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch ); /* Thread 0 issues asynchronous all reduce */ if (tid == 0) { MPI_Iallreduce(MPI_IN_PLACE, delfil_libxsmm[i], C[i]*C[i+1], MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD, &request[i%2]); } if (i < num_layers-1) { /* Wait for the MPI_Iallreduce issued in the previous iteration to complete */ if (tid == 0) { MPI_Wait(&request[(i+1)%2], MPI_STATUS_IGNORE); } /* All threads wait for the all-reduce to complete in order to execute the optimizer... */ #pragma omp barrier libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i+1], fil_libxsmm[i+1], delfil_libxsmm[i+1], 0, tid, scratch ); } } /* Only UPD pass for first layer */ libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch ); if (tid == 0) { MPI_Iallreduce(MPI_IN_PLACE, delfil_libxsmm[0], C[0]*C[1], MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD, &request[0]); } if (tid == 0) { MPI_Wait(&request[1], MPI_STATUS_IGNORE); } /* All threads wait for the all-reduce to complete in order to execute the optimizer... */ #pragma omp barrier libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[1], fil_libxsmm[1], delfil_libxsmm[1], 0, tid, scratch ); if (tid == 0) { MPI_Wait(&request[0], MPI_STATUS_IGNORE); } /* All threads wait for the all-reduce to complete in order to execute the optimizer... */ #pragma omp barrier libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch ); } } MPI_Barrier(MPI_COMM_WORLD); l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (4.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (2.0*(double)global_MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); if (rank == 0) { printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); } MPI_Barrier(MPI_COMM_WORLD); #if 1 if (rank == n_procs - 1) { for ( i = 0 ; i < num_layers; ++i ) { libxsmm_matdiff(&norms, LIBXSMM_DATATYPE_F32, C[i]*C[i+1], 1, delfil_libxsmm[i], delfil_libxsmm[i], 0, 0); printf("L1 of layer's %d dweights after training : %.25g\n", i, norms.l1_ref); libxsmm_matdiff_clear(&norms); } } #endif } if (type == 'A') { if (rank == 0) { printf("##########################################\n"); printf("# Performance - FWD-BWD (custom-Storage) #\n"); printf("##########################################\n"); } MPI_Barrier(MPI_COMM_WORLD); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif unsigned long long t0, t1; for (j = 0; j < iters; ++j) { #ifdef DETAILED_PROFILE if (tid == 0) { t0 = libxsmm_timer_tick(); } #endif for ( i = 0; i < num_layers; ++i) { libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch ); } #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_fwd_fc[0] += libxsmm_timer_duration(t0, t1); t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss_weight, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_fwd_loss[0] += libxsmm_timer_duration(t0, t1); t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_bwd_loss[0] += libxsmm_timer_duration(t0, t1); } #endif for ( i = num_layers-1; i > 0; --i) { #ifdef DETAILED_PROFILE if (tid == 0) { t0 = libxsmm_timer_tick(); } #endif libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch ); #ifdef DETAILED_PROFILE if (tid == 0) { t1 = libxsmm_timer_tick(); l_bwdupd_fc[0] += libxsmm_timer_duration(t0, t1); if (i == num_layers-1) { first_bwdupd_compute += libxsmm_timer_duration(t0, t1); } } #endif /* Thread 0 issues asynchronous all reduce */ if (tid == 0) { MPI_Iallreduce(MPI_IN_PLACE, delfil_libxsmm[i], C[i]*C[i+1], MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD, &request[i%2]); } if (i < num_layers-1) { /* Wait for the MPI_Iallreduce issued in the previous iteration to complete */ if (tid == 0) { MPI_Wait(&request[(i+1)%2], MPI_STATUS_IGNORE); } /* All threads wait for the all-reduce to complete in order to execute the optimizer... */ #pragma omp barrier libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i+1], fil_libxsmm[i+1], delfil_libxsmm[i+1], 0, tid, scratch ); } } /* Only UPD pass for first layer */ libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch ); if (tid == 0) { MPI_Iallreduce(MPI_IN_PLACE, delfil_libxsmm[0], C[0]*C[1], MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD, &request[0]); } if (tid == 0) { MPI_Wait(&request[1], MPI_STATUS_IGNORE); } /* All threads wait for the all-reduce to complete in order to execute the optimizer... */ #pragma omp barrier libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[1], fil_libxsmm[1], delfil_libxsmm[1], 0, tid, scratch ); if (tid == 0) { MPI_Wait(&request[0], MPI_STATUS_IGNORE); } /* All threads wait for the all-reduce to complete in order to execute the optimizer... */ #pragma omp barrier libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch ); } } MPI_Barrier(MPI_COMM_WORLD); l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)global_MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); if (rank == 0) { printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); #ifdef DETAILED_PROFILE double tot = /*l_allreduce[0] + l_optimizer[0] +*/ l_fwd_fc[0] + l_bwdupd_fc[0] + l_fwd_loss[0] + l_bwd_loss[0]; printf("FC time compute/loss = %.5g\n", ((double)(tot/iters))); printf("Bwdupd compute FIRST time overlaped = %.5g\n", ((double)((first_bwdupd_compute)/iters))); printf("Bwdupd compute time overlaped = %.5g\n", ((double)((l_bwdupd_fc[0]-first_bwdupd_compute)/iters))); #endif } MPI_Barrier(MPI_COMM_WORLD); #if 0 if (rank == n_procs - 1) { for ( i = 0 ; i < num_layers; ++i ) { libxsmm_matdiff(&norms, LIBXSMM_DATATYPE_F32, C[i]*C[i+1], 1, delfil_libxsmm[i], delfil_libxsmm[i], 0, 0); printf("L1 of layer's %d dweights after training : %.25g\n", i, norms.l1_ref); libxsmm_matdiff_clear(&norms); } } #endif } /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { libxsmm_free(act_libxsmm[i]); libxsmm_free(delact_libxsmm[i]); } libxsmm_free(act_libxsmm[i+1]); libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); } libxsmm_free(act_libxsmm[num_layers+1]); libxsmm_free(label_libxsmm); free( act_libxsmm ); free( delact_libxsmm ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( libxsmm_dnn_opt ); free( libxsmm_dnn_fc_fwd ); free( libxsmm_dnn_fc_bwd ); free( C ); if (rank == 0) { for ( i = 0 ; i < num_layers+2; ++i ) { libxsmm_free(ref_act_libxsmm[i]); } free(ref_act_libxsmm); for ( i = 0 ; i < num_layers+1; ++i ) { libxsmm_free(ref_delact_libxsmm[i]); } free(ref_delact_libxsmm); } /* Finalize the MPI environment */ MPI_Finalize(); return 0; }
re_model_template.h
/*! * This file is part of GPBoost a C++ library for combining * boosting with Gaussian process and mixed effects models * * Copyright (c) 2020 Fabio Sigrist. All rights reserved. * * Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information. */ #ifndef GPB_RE_MODEL_TEMPLATE_H_ #define GPB_RE_MODEL_TEMPLATE_H_ #define _USE_MATH_DEFINES // for M_PI #include <cmath> #include <GPBoost/log.h> #include <GPBoost/type_defs.h> #include <GPBoost/re_comp.h> #include <GPBoost/sparse_matrix_utils.h> #include <GPBoost/Vecchia_utils.h> #include <GPBoost/GP_utils.h> //#include <Eigen/src/misc/lapack.h> #include <memory> #include <mutex> #include <vector> #include <algorithm> // std::shuffle #include <random> // std::default_random_engine //#include <typeinfo> // Only needed for debugging //#include <chrono> // only needed for debugging //#include <thread> // only needed for debugging //std::this_thread::sleep_for(std::chrono::milliseconds(200));// Only for debugging //std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();// Only for debugging //std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();// Only for debugging //double el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()) / 1000000.;// Only for debugging //Log::Info("Time for : %g", el_time);// Only for debugging #ifndef M_PI #define M_PI 3.1415926535897932384626433832795029 #endif namespace GPBoost { /*! * \brief Template class used in the wrapper class REModel * The template parameters T1 and T2 can either be <sp_mat_t, chol_sp_mat_t> or <den_mat_t, chol_den_mat_t> */ template<typename T1, typename T2> class REModelTemplate { public: /*! \brief Null costructor */ REModelTemplate(); /*! * \brief Costructor * \param num_data Number of data points * \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization) * \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param num_re_group Number of grouped (intercept) random effects * \param re_group_rand_coef_data Covariate data for grouped random coefficients * \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1. * \param num_re_group_rand_coef Number of grouped random coefficient * \param num_gp Number of (intercept) Gaussian processes * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006) * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance, irrelevant for some covariance functions such as the exponential or Gaussian) * \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process * \param num_neighbors The number of neighbors used in the Vecchia approximation * \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering * \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points * \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions */ REModelTemplate(data_size_t num_data, const gp_id_t* cluster_ids_data = nullptr, const char* re_group_data = nullptr, data_size_t num_re_group = 0, const double* re_group_rand_coef_data = nullptr, const int32_t* ind_effect_group_rand_coef = nullptr, data_size_t num_re_group_rand_coef = 0, data_size_t num_gp = 0, const double* gp_coords_data = nullptr, int dim_gp_coords = 2, const double* gp_rand_coef_data = nullptr, data_size_t num_gp_rand_coef = 0, const char* cov_fct = nullptr, double cov_fct_shape = 0., bool vecchia_approx = false, int num_neighbors = 30, const char* vecchia_ordering = nullptr, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = 30) { num_cov_par_ = 1; CHECK(num_data > 0); num_data_ = num_data; vecchia_approx_ = vecchia_approx; //Set up GP IDs SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_); //Indices of parameters of individual components in joint parameter vector ind_par_.push_back(num_cov_par_);// 1 is starting point of parameter for first component since the first parameter is the nugget effect variance num_comps_total_ = 0; //Do some checks for grouped RE components and set meta data (number of components etc.) std::vector<std::vector<string_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j) if (num_re_group > 0) { if (vecchia_approx) { Log::Fatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation)."); } num_re_group_ = num_re_group; CHECK(re_group_data != nullptr); if (num_re_group_rand_coef > 0) { num_re_group_rand_coef_ = num_re_group_rand_coef; CHECK(re_group_rand_coef_data != nullptr); CHECK(ind_effect_group_rand_coef != nullptr); for (int j = 0; j < num_re_group_rand_coef_; ++j) { CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_); } ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_); } num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_; num_cov_par_ += num_re_group_total_; num_comps_total_ += num_re_group_total_; //Add indices of parameters of individual components in joint parameter vector for (int j = 0; j < num_re_group_total_; ++j) { ind_par_.push_back(ind_par_.back() + 1);//end points of parameter indices of components } // Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels re_group_levels = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_)); if (num_re_group_ > 0) { ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels); } } //Do some checks for GP components and set meta data (number of components etc.) if (num_gp > 0) { if (num_gp > 1) { Log::Fatal("num_gp can only be either 0 or 1 in the current implementation"); } num_gp_ = num_gp; ind_intercept_gp_ = num_comps_total_; CHECK(dim_gp_coords > 0); CHECK(gp_coords_data != nullptr); CHECK(cov_fct != nullptr); dim_gp_coords_ = dim_gp_coords; cov_fct_ = std::string(cov_fct); cov_fct_shape_ = cov_fct_shape; if (vecchia_approx) { Log::Info("Starting nearest neighbor search for Vecchia approximation"); CHECK(num_neighbors > 0); num_neighbors_ = num_neighbors; CHECK(num_neighbors_pred > 0); num_neighbors_pred_ = num_neighbors_pred; if (vecchia_ordering == nullptr) { vecchia_ordering_ = "none"; } else { vecchia_ordering_ = std::string(vecchia_ordering); CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random"); } if (vecchia_pred_type == nullptr) { vecchia_pred_type_ = "order_obs_first_cond_obs_only"; } else { vecchia_pred_type_ = std::string(vecchia_pred_type); if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) { Log::Fatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str()); } } } if (num_gp_rand_coef > 0) {//Random slopes CHECK(gp_rand_coef_data != nullptr); num_gp_rand_coef_ = num_gp_rand_coef; } num_gp_total_ = num_gp_ + num_gp_rand_coef_; num_cov_par_ += (2 * num_gp_total_); num_comps_total_ += num_gp_total_; //Add indices of parameters of individual components in joint parameter vector for (int j = 0; j < num_gp_total_; ++j) { ind_par_.push_back(ind_par_.back() + 2);//end points of parameter indices of components } if (vecchia_approx) { double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_); int mem_size = (int)(num_mem_d * 8. / 1000000.); if (mem_size > 8000) { Log::Warning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size); } } } if (num_re_group_ > 0 && num_gp_total_ == 0) { do_symbolic_decomposition_ = true;//Symbolic decompostion is only done if sparse matrices are used use_woodbury_identity_ = true;//Faster to use Woodbury identity since the dimension of the random effects is typically much smaller than the number of data points //Note: the use of the Woodburry identity is currently only implemented for grouped random effects (which is also the only use of it). // If this should be applied to GPs in the future, adaptions need to be made e.g. in the calculations of the gradient (see y_tilde2_) } else { do_symbolic_decomposition_ = false; use_woodbury_identity_ = false; } //Create RE/GP component models for (const auto& cluster_i : unique_clusters_) { std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i; if (vecchia_approx_) { std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<Triplet_t> entries_init_B_cluster_i; std::vector<Triplet_t> entries_init_B_grad_cluster_i; std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); CreateREComponentsVecchia(num_data_, data_indices_per_cluster_, cluster_i, num_data_per_cluster_, gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, vecchia_ordering_, num_neighbors_); nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i }); dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i }); dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i }); entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i }); entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i }); z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i }); }//end vecchia_approx_ else {//not vecchia_approx_ CreateREComponents(num_data_, num_re_group_, data_indices_per_cluster_, cluster_i, re_group_levels, num_data_per_cluster_, num_re_group_rand_coef_, re_group_rand_coef_data, ind_effect_group_rand_coef_, num_gp_, gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, !use_woodbury_identity_, re_comps_cluster_i); if (use_woodbury_identity_) {//Create matrices Z and ZtZ if Woodbury identity is used (used only if there are only grouped REs and no GPs) CHECK(num_comps_total_ == num_re_group_total_); std::vector<data_size_t> cum_num_rand_eff_cluster_i(num_comps_total_ + 1); cum_num_rand_eff_cluster_i[0] = 0; //Determine number of rows and non-zero entries of Z int non_zeros = 0; int ncols = 0; for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_cluster_i[j]->GetZ(); ncols += (int)Z_j->cols(); non_zeros += (int)Z_j->nonZeros(); cum_num_rand_eff_cluster_i[j + 1] = ncols; } //Create matrix Z and calculate sum(Z_j^2) = trace(Z_j^T * Z_j) std::vector<Triplet_t> triplets; triplets.reserve(non_zeros); std::vector<double> Zj_square_sum_cluster_i(num_comps_total_); int ncol_prev = 0; for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_cluster_i[j]->GetZ(); for (int k = 0; k < Z_j->outerSize(); ++k) { for (sp_mat_t::InnerIterator it(*Z_j, k); it; ++it) { triplets.emplace_back(it.row(), ncol_prev + it.col(), it.value()); } } ncol_prev += (int)Z_j->cols(); Zj_square_sum_cluster_i[j] = Z_j->squaredNorm(); } sp_mat_t Z_cluster_i(num_data_per_cluster_[cluster_i], ncols); Z_cluster_i.setFromTriplets(triplets.begin(), triplets.end()); sp_mat_t Zt_cluster_i = Z_cluster_i.transpose(); sp_mat_t ZtZ_cluster_i = Zt_cluster_i * Z_cluster_i; //Calculate Z^T * Z_j std::vector<sp_mat_t> ZtZj_cluster_i(num_comps_total_); for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_cluster_i[j]->GetZ(); ZtZj_cluster_i[j] = Zt_cluster_i * (*Z_j); } //Save all quantities Zt_.insert({ cluster_i, Zt_cluster_i }); ZtZ_.insert({ cluster_i, ZtZ_cluster_i }); cum_num_rand_eff_.insert({ cluster_i, cum_num_rand_eff_cluster_i }); Zj_square_sum_.insert({ cluster_i, Zj_square_sum_cluster_i }); ZtZj_.insert({ cluster_i, ZtZj_cluster_i }); }//end use_woodbury_identity_ ConstructI<T1>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent }//end not vecchia_approx_ re_comps_.insert({ cluster_i, re_comps_cluster_i }); }//end loop over clusters if (vecchia_approx_) { Log::Info("Nearest neighbors for Vecchia approximation found"); } ////Following only prints stuff for debugging //Log::Info("********************** Meta data ********************************"); //Log::Info("num_data_ : %d", num_data_); //Log::Info("num_clusters_ : %d", num_clusters_); //Log::Info("num_re_group_ : %d", num_re_group_); //Log::Info("num_re_group_rand_coef_ : %d", num_re_group_rand_coef_); //Log::Info("num_re_group_total_ : %d", num_re_group_total_); //Log::Info("num_gp_rand_coef_ : %d", num_gp_rand_coef_); //Log::Info("num_gp_total_ : %d", num_gp_total_); //Log::Info("num_cov_par_: %d", num_cov_par_); //for (unsigned i = 0; i < ind_par_.size(); i++) { Log::Info("ind_par_[%d]: %d", i, ind_par_[i]); } //Log::Info("******************************************************"); //int ii = 0; //for (const auto& cluster_i : unique_clusters_) { // Log::Info("unique_clusters_[%d]: %d", ii, cluster_i); // Log::Info("num_data_per_cluster_[%d]: %d", cluster_i, num_data_per_cluster_[cluster_i]); // //for (int j = 0; j < std::min((int)data_indices_per_cluster_[cluster_i].size(), 10); ++j) { Log::Info("data_indices_per_cluster_[%d][%d]: %d", cluster_i, j, data_indices_per_cluster_[cluster_i][j]); } // if (num_re_group_ > 0) { // Log::Info("*********************** Grouped REs *******************************"); // //Log::Info("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name()); // //Log::Info("re_comps_[cluster_i].size(): %d", re_comps_[cluster_i].size()); // //for (const auto& re_comp : re_comps_[cluster_i]) { // for (int j = 0; j < re_comps_[cluster_i].size(); ++j) { // std::shared_ptr<RECompGroup<T1>> re_comp_group = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][j]); // //for (const auto& el : re_comp_group->group_data_) { Log::Info("re_comps_[%d][j].group_data_[i]: %d", cluster_i, el); } // if (!re_comp_group->is_rand_coef_) { // for (int i = 0; i < std::min((int)(*re_comp_group->group_data_).size(), 10); i++) { Log::Info("re_comps_[%d][%d].group_data_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); } // } // else if (re_comp_group->is_rand_coef_) { // for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].group_data_ref_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); } // for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].rand_coef_data_[%d]: %g", cluster_i, j, i, re_comp_group->rand_coef_data_[i]); } // } // } // } // ii++; //} } /*! \brief Destructor */ ~REModelTemplate() { } /*! \brief Disable copy */ REModelTemplate& operator=(const REModelTemplate&) = delete; /*! \brief Disable copy */ REModelTemplate(const REModelTemplate&) = delete; /*! * \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent * Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients * If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations * \param y_data Response variable data * \param covariate_data Covariate data (=independent variables, features). Set to nullptr if there is no covariate data * \param num_covariates Number of covariates * \param[out] optim_cov_pars Optimal covariance parameters * \param[out] optim_coef Optimal regression coefficients * \param[out] num_it Number of iterations * \param init_cov_pars Initial values for covariance parameters of RE components * \param init_coef Initial values for the regression coefficients * \param lr_coef Learning rate for fixed-effect linear coefficients * \param lr_cov Learning rate for covariance parameters. If <= 0, default values are used. Default value = 0.01 for "gradient_descent" and 1. for "fisher_scoring" * \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0). * \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0). * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param max_iter Maximal number of iterations * \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value * \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring" (default) * \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares, default) * \param[out] std_dev_cov_par Standard deviations for the covariance parameters * \param[out] std_dev_coef Standard deviations for the coefficients * \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information * \param convergence_criterion The convergence criterion used for terminating the optimization algorithm. Options: "relative_change_in_log_likelihood" (default) or "relative_change_in_parameters" */ void OptimLinRegrCoefCovPar(const double* y_data, const double* covariate_data, int num_covariates, double* optim_cov_pars, double* optim_coef, int& num_it, double* init_cov_pars, double* init_coef = nullptr, double lr_coef = 0.01, double lr_cov = -1., double acc_rate_coef = 0.1, double acc_rate_cov = 0.5, int momentum_offset = 2, int max_iter = 1000, double delta_rel_conv = 1.0e-6, bool use_nesterov_acc = true, int nesterov_schedule_version = 0, string_t optimizer_cov = "fisher_scoring", string_t optimizer_coef = "wls", double* std_dev_cov_par = nullptr, double* std_dev_coef = nullptr, bool calc_std_dev = false, string_t convergence_criterion = "relative_change_in_log_likelihood") { // If optim_learning_rate_halving==true, the learning rate is halved when the negative log - likelihood increases. TODO: enable some sort of safeguard also when Nesterov acceleation is used bool optim_learning_rate_halving = !use_nesterov_acc; // Some checks if (covariate_data == nullptr) { has_covariates_ = false; } else { has_covariates_ = true; } if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) { Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str()); } if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end() && has_covariates_) { Log::Fatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str()); } CHECK(SUPPORTED_CONF_CRIT_.find(convergence_criterion) != SUPPORTED_CONF_CRIT_.end()); // Definition and initialization of regression coefficients related variables vec_t beta, beta_lag1, beta_acc, resid; if (has_covariates_) { num_coef_ = num_covariates; X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_); //Check whether one of the colums contains only 1's and if not, give out warning vec_t vec_ones(num_data_); vec_ones.setOnes(); bool has_intercept = false; for (int icol = 0; icol < num_coef_; ++icol) { if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) { has_intercept = true; break; } } if (!has_intercept) { Log::Warning("The covariate data contains no column of ones. This means that there is no intercept included."); } y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_); beta = vec_t(num_covariates); if (init_coef == nullptr) { beta.setZero(); } else { beta = Eigen::Map<const vec_t>(init_coef, num_covariates); } beta_lag1 = vec_t(num_covariates); beta_acc = vec_t(num_covariates); } // Definition and initialization of covariance parameters related variables if (lr_cov <= 0.) { if (optimizer_cov == "fisher_scoring") { lr_cov = 1.; } else if (optimizer_cov == "gradient_descent") { lr_cov = 0.01; } } double lr_cov_init = lr_cov; if (!has_covariates_) { SetY(y_data); } vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_); vec_t cov_pars_lag1 = vec_t(num_cov_par_); vec_t cov_pars_acc = vec_t(num_cov_par_); Log::Debug("Initial covariance parameters"); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %g", i, cov_pars[i]); } // Initialization of remaining variables double neg_log_like = 1e99; double neg_log_like_lag1 = neg_log_like; bool terminate_optim = false; bool CalcCovFactor_already_done = false; num_it = max_iter; // Start optimization for (int it = 0; it < max_iter; ++it) { if (convergence_criterion == "relative_change_in_log_likelihood" || optim_learning_rate_halving) { neg_log_like_lag1 = neg_log_like; } // Factorize the covariance matrix // Note: this is typically done here only once in the first iteration, afterwards this has already been done when calculating the likelihood in the previous iteratio if (!CalcCovFactor_already_done) { SetCovParsComps(cov_pars); CalcCovFactor(vecchia_approx_, true, 1., false); } // Update linear regression coefficients using gradient descent or generalized least squares if (has_covariates_) { // Apply momentum step to regression coefficients if (use_nesterov_acc && optimizer_coef == "gradient_descent" && it > 0) { ApplyMomentumStep(it, beta, beta_lag1, beta_acc, acc_rate_coef, nesterov_schedule_version, false, momentum_offset, false); beta_lag1 = beta; beta = beta_acc; } else { beta_lag1 = beta; } if (optimizer_coef == "gradient_descent") {// one step of gradient descent resid = y_vec_ - (X_ * beta); SetY(resid.data()); CalcYAux(); UpdateCoefGradOneIter(lr_coef, cov_pars[0], X_, beta); } else if (optimizer_coef == "wls") {// coordinate descent using generalized least squares SetY(y_vec_.data()); CalcYAux(); UpdateCoefGLS(X_, beta); } // Set resid for updating covariance parameters resid = y_vec_ - (X_ * beta); SetY(resid.data()); }// end update regression coefficients // Update covariance parameters using gradient descent or Fisher scoring // Apply Nesterov momentum if (use_nesterov_acc && it > 0) { ApplyMomentumStep(it, cov_pars, cov_pars_lag1, cov_pars_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset, true); cov_pars_lag1 = cov_pars; cov_pars = cov_pars_acc; } else { cov_pars_lag1 = cov_pars; } // Calculate y_aux = Psi^-1 * y (if not use_woodbury_identity_) or y_tilde and y_tilde2 (if use_woodbury_identity_) for covariance parameter gradient calculation // Note: this is typically done here only once in the first iteration (if convergence_criterion == "relative_change_in_log_likelihood"), afterwards this has already been done when calculating the likelihood in the previous iteration if (!CalcCovFactor_already_done || has_covariates_) { if (use_woodbury_identity_) { CalcYtilde<T1>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else { CalcYAux();//y_aux = Psi^-1 * y } } // Calculate gradient or natural gradient = FI^-1 * grad (for Fisher scoring) vec_t nat_grad; // nat_grad = grad for gradient descent and nat_grad = FI^-1 * grad for Fisher scoring (="natural" gradient) if (optimizer_cov == "gradient_descent") {//gradient descent // First, profile out sigma (=use closed-form expression for error / nugget variance) since this is better for gradient descent (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...) CalcYTPsiIInvY<T1>(cov_pars[0], true, 1, true, true); cov_pars[0] /= num_data_; sigma2_ = cov_pars[0]; CalcCovParGrad(cov_pars, nat_grad, false, false); } else if (optimizer_cov == "fisher_scoring") {//Fisher scoring // We don't profile out sigma (=don't use closed-form expression for error / nugget variance) since this is better for Fisher scoring (otherwise much more iterations are needed) vec_t grad; den_mat_t FI; CalcCovParGrad(cov_pars, grad, true, true); CalcFisherInformation(cov_pars, FI, true, true, true); nat_grad = FI.llt().solve(grad); } // Safeguard agains too large steps by applying step halving to the learning rate if (optim_learning_rate_halving) { vec_t cov_pars_new(num_cov_par_); if (optimizer_cov == "gradient_descent") { cov_pars_new[0] = cov_pars[0]; } bool decrease_found = false; bool halving_done = false; for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) { if (optimizer_cov == "gradient_descent") { cov_pars_new.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr_cov * nat_grad.array()).exp().matrix();//make update on log-scale } else if (optimizer_cov == "fisher_scoring") { cov_pars_new = (cov_pars.array().log() - lr_cov * nat_grad.array()).exp().matrix();//make update on log-scale } SetCovParsComps(cov_pars_new); CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used) if (use_woodbury_identity_) { CalcYtilde<T1>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else { CalcYAux();//y_aux = Psi^-1 * y } EvalNegLogLikelihood(nullptr, cov_pars_new.data(), neg_log_like, true, true, true); if (neg_log_like <= neg_log_like_lag1) { decrease_found = true; break; } else { halving_done = true; lr_cov *= 0.5; if (has_covariates_ && optimizer_coef == "gradient_descent") { lr_coef *= 0.5; } } } if (halving_done) { if (optimizer_cov == "fisher_scoring") { Log::Debug("GPModel covariance parameter estimation: No decrease in negative log-likelihood in iteration number %d. The learning rate has been decreased in this iteration.", it + 1); } else if (optimizer_cov == "gradient_descent") { Log::Info("GPModel covariance parameter estimation: No decrease in negative log-likelihood in iteration number %d. The learning rate has been decreased permanently. New learning rate = %g", it + 1, lr_cov); } } if (!decrease_found) { Log::Warning("GPModel covariance parameter estimation: No decrease in negative log-likelihood in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_); } if (halving_done && optimizer_cov == "fisher_scoring") { // reset lr_cov to initial value for Fisher scoring for next iteration. I.e., step halving is done newly in every iterarion of Fisher scoring lr_cov = lr_cov_init; } cov_pars = cov_pars_new; CalcCovFactor_already_done = true; }// end optim_learning_rate_halving else {// no safeguard agains too large steps if (optimizer_cov == "gradient_descent") { cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr_cov * nat_grad.array()).exp().matrix();//make update on log-scale } else if (optimizer_cov == "fisher_scoring") { cov_pars = (cov_pars.array().log() - lr_cov * nat_grad.array()).exp().matrix();//make update on log-scale } // Calculate new negative log-likelihood if (convergence_criterion == "relative_change_in_log_likelihood") { SetCovParsComps(cov_pars); CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used) if (use_woodbury_identity_) { CalcYtilde<T1>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else { CalcYAux();//y_aux = Psi^-1 * y } CalcCovFactor_already_done = true; EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_like, true, true, true); if (neg_log_like > neg_log_like_lag1&& use_nesterov_acc) { Log::Warning("GPModel covariance parameter estimation: No decrease in negative log-likelihood in iteration number %d. There is no safeguard (halving of the learning rate) in place when applying Nesterov acceleration ", it + 1); } } } CheckNaNInf(cov_pars); //Check convergence bool likelihood_is_na = std::isnan(neg_log_like) || std::isinf(neg_log_like);//if the likelihood is NA, we monitor the parameters instead of the likelihood if (convergence_criterion == "relative_change_in_parameters" || likelihood_is_na) { if (has_covariates_) { if (((beta - beta_lag1).norm() / beta_lag1.norm() < delta_rel_conv) && ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv)) { terminate_optim = true; } } else { if ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv) { terminate_optim = true; } } } else if (convergence_criterion == "relative_change_in_log_likelihood") { if (std::abs((neg_log_like - neg_log_like_lag1) / neg_log_like_lag1) < delta_rel_conv) { terminate_optim = true; } } // Output for debugging if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) { Log::Debug("Covariance parameter optimization iteration number %d", it + 1); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %g", i, cov_pars[i]); } for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::Debug("beta[%d]: %g", i, beta[i]); } if (convergence_criterion == "relative_change_in_log_likelihood" || optim_learning_rate_halving) { Log::Debug("Negative log-likelihood: %g", neg_log_like); } } // Check whether to terminate if (terminate_optim) { num_it = it + 1; break; } } if (num_it == max_iter) { Log::Warning("GPModel covariance parameter estimation: no convergence after the maximal number of iterations"); } for (int i = 0; i < num_cov_par_; ++i) { optim_cov_pars[i] = cov_pars[i]; } if (calc_std_dev) { vec_t std_dev_cov(num_cov_par_); CalcStdDevCovPar(cov_pars, std_dev_cov); for (int i = 0; i < num_cov_par_; ++i) { std_dev_cov_par[i] = std_dev_cov[i]; } } if (has_covariates_) { for (int i = 0; i < num_covariates; ++i) { optim_coef[i] = beta[i]; } if (calc_std_dev) { vec_t std_dev_beta(num_covariates); CalcStdDevCoef(cov_pars, X_, std_dev_beta); for (int i = 0; i < num_covariates; ++i) { std_dev_coef[i] = std_dev_beta[i]; } } } } /*! * \brief Calculate the value of the negative log-likelihood * \param y_data Response variable data * \param cov_pars Values for covariance parameters of RE components * \param[out] negll Negative log-likelihood * \param CalcCovFactor_already_done If true, it is assumed that the covariance matrix has already been factorized * \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not use_woodbury_identity_) * \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for use_woodbury_identity_) */ void EvalNegLogLikelihood(const double* y_data, double* cov_pars, double& negll, bool CalcCovFactor_already_done = false, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) { CHECK(!(CalcYAux_already_done && !CalcCovFactor_already_done));// CalcYAux_already_done && !CalcCovFactor_already_done makes no sense if (y_data != nullptr) { SetY(y_data); } else { CHECK(CalcYAux_already_done || CalcYtilde_already_done); } if (!CalcCovFactor_already_done) { vec_t cov_pars_vec = Eigen::Map<vec_t>(cov_pars, num_cov_par_); SetCovParsComps(cov_pars_vec); CalcCovFactor(false, true, 1., false);//Create covariance matrix and factorize it } //Calculate quadratic form y^T Psi^-1 y double yTPsiInvy; CalcYTPsiIInvY<T1>(yTPsiInvy, true, 1, CalcYAux_already_done, CalcYtilde_already_done); //Calculate log determinant double log_det = 0; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { log_det -= D_inv_[cluster_i].diagonal().array().log().sum(); } else { if (use_woodbury_identity_) { log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum()); for (int j = 0; j < num_comps_total_; ++j) { int num_rand_eff = cum_num_rand_eff_[cluster_i][j + 1] - cum_num_rand_eff_[cluster_i][j]; log_det += (num_rand_eff * std::log(re_comps_[cluster_i][j]->cov_pars_[0])); } } else { log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum()); } } } negll = yTPsiInvy / 2. / cov_pars[0] + log_det / 2. + num_data_ / 2. * (std::log(cov_pars[0]) + std::log(2 * M_PI)); } /*! * \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost) * \param num_data_pred Number of data points for which predictions are made * \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made * \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients * \param gp_coords_data_pred Coordinates (features) for Gaussian process * \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients * \param covariate_data_pred Covariate data (=independent variables, features) for prediction */ void SetPredictionData(int num_data_pred, const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr, const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr, const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) { if (cluster_ids_data_pred == nullptr) { cluster_ids_data_pred_.clear(); } else { cluster_ids_data_pred_ = std::vector<gp_id_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred); } if (re_group_data_pred == nullptr) { re_group_levels_pred_.clear(); if (num_re_group_ > 0) { Log::Fatal("No group data is provided for making predictions"); } } else { //For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred' re_group_levels_pred_ = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred)); ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_); } if (re_group_rand_coef_data_pred == nullptr) { re_group_rand_coef_data_pred_.clear(); } else { re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_); } if (gp_coords_data_pred == nullptr) { gp_coords_data_pred_.clear(); } else { gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_); } if (gp_rand_coef_data_pred == nullptr) { gp_rand_coef_data_pred_.clear(); } else { gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_); } if (covariate_data_pred == nullptr) { covariate_data_pred_.clear(); } else { covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_); } } /*! * \brief Make predictions: calculate conditional mean and covariance matrix * Note: You should pre-allocate memory for out_predict * Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat=false) * or num_data_pred * (1 + num_data_pred) if both the conditional mean and covariance matrix are predicted (predict_cov_mat=true) * \param cov_pars_pred Covariance parameters of components * \param y_obs Response variable for observed data * \param num_data_pred Number of data points for which predictions are made * \param[out] out_predict Conditional mean at prediciton points (="predicted value") followed by (if predict_cov_mat=true) the conditional covariance matrix at in column-major format * \param calc_cov_factor If true, the covariance matrix of the observed data is factorized otherwise a previously done factorization is used (default=true) * \param predict_cov_mat If true, the conditional covariance matrix is calculated (default=false) * \param covariate_data_pred Covariate data (=independent variables, features) for prediction * \param coef_pred Coefficients for linear covariates * \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made * \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients * \param gp_coords_data_pred Coordinates (features) for Gaussian process * \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients * \param use_saved_data If true, saved data is used and some arguments are ignored * \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points * \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used) */ void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred, double* out_predict, bool calc_cov_factor = true, bool predict_cov_mat = false, const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr, const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr, const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr, const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1) { //Should previously set data be used? std::vector<std::vector<string_t>> re_group_levels_pred;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j) if (use_saved_data) { re_group_levels_pred = re_group_levels_pred_; if (cluster_ids_data_pred_.empty()) { cluster_ids_data_pred = nullptr; } else { cluster_ids_data_pred = cluster_ids_data_pred_.data(); } if (re_group_rand_coef_data_pred_.empty()) { re_group_rand_coef_data_pred = nullptr; } else { re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data(); } if (gp_coords_data_pred_.empty()) { gp_coords_data_pred = nullptr; } else { gp_coords_data_pred = gp_coords_data_pred_.data(); } if (gp_rand_coef_data_pred_.empty()) { gp_rand_coef_data_pred = nullptr; } else { gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data(); } if (covariate_data_pred_.empty()) { covariate_data_pred = nullptr; } else { covariate_data_pred = covariate_data_pred_.data(); } } else { if (num_re_group_ > 0) { if (re_group_data_pred == nullptr) { Log::Fatal("No group data is provided for making predictions"); } else { //For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred' re_group_levels_pred = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred)); ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred); } } } //Some checks CHECK(num_data_pred > 0); if (has_covariates_) { CHECK(covariate_data_pred != nullptr); CHECK(coef_pred != nullptr); } if (y_obs == nullptr) { if (y_.empty()) { Log::Fatal("Observed data is not provided and has not been set before"); } } //Check whether some data is missing if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) { Log::Fatal("No covariate data for grouped random coefficients is provided for making predictions"); } if (gp_coords_data_pred == nullptr && num_gp_ > 0) { Log::Warning("No coordinate data for the Gaussian process is provided for making predictions"); } if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) { Log::Warning("No covariate data for Gaussian process random coefficients is provided for making predictions"); } if (num_data_pred > 10000 && predict_cov_mat) { double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred); int mem_size = (int)(num_mem_d * 8. / 1000000.); Log::Warning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package and ask to implement this feature.", num_data_pred, mem_size); } if (vecchia_approx_) { if (vecchia_pred_type != nullptr) { string_t vecchia_pred_type_S = std::string(vecchia_pred_type); if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_S) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) { Log::Fatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_S.c_str()); } vecchia_pred_type_ = vecchia_pred_type_S; } if (num_neighbors_pred > 0) { num_neighbors_pred_ = num_neighbors_pred; } } // Add linear regression term to mean vec_t coef; if (has_covariates_) { coef = Eigen::Map<const vec_t>(coef_pred, num_coef_); den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_); vec_t mu = X_pred * coef; #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_pred; ++i) { out_predict[i] = mu[i]; } } vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_); //Set up cluster IDs std::map<gp_id_t, int> num_data_per_cluster_pred; std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_pred; std::vector<gp_id_t> unique_clusters_pred; data_size_t num_clusters_pred; SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred); //Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made bool pred_for_observed_data = false; for (const auto& cluster_i : unique_clusters_pred) { if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) { pred_for_observed_data = true; break; } } //Factorize covariance matrix and calculate Psi^{-1}y_obs (if required for prediction) if (pred_for_observed_data) {//TODO (low prio): this acutally needs to be done only for the GP realizations for which predictions are made (currently it is done for all of them in unique_clusters_pred) if (has_covariates_) { vec_t resid; if (y_obs != nullptr) { vec_t y = Eigen::Map<const vec_t>(y_obs, num_data_); resid = y - (X_ * coef); } else { resid = y_vec_ - (X_ * coef); } SetY(resid.data()); } else { if (y_obs != nullptr) { SetY(y_obs); } } SetCovParsComps(cov_pars); if (!vecchia_approx_) {// no need to call CalcCovFactor here for the Vecchia approximation, is done in the prediction steps if (calc_cov_factor) { CalcCovFactor(false, true, 1., false); } CalcYAux();//note: in some cases a call to CalcYAux() could be avoided (e.g. no covariates and not GPBoost algorithm)... } }//end if(pred_for_observed_data) //Initialize covariance matrix if (predict_cov_mat) {//TODO: avoid unnecessary initialization (only set to 0 for covariances accross different realizations of GPs) #pragma omp parallel for schedule(static) for (int i = 0; i < (num_data_pred * num_data_pred); ++i) { out_predict[i + num_data_pred] = 0.; } } for (const auto& cluster_i : unique_clusters_pred) { //Case 1: no data observed for this Gaussian process with ID 'cluster_i'. Thus use prior mean (0) and prior covariance matrix if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) { if (!has_covariates_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i]] = 0.; } } if (predict_cov_mat) { T1 psi; std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i; if (vecchia_approx_) { std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<Triplet_t> entries_init_B_cluster_i; std::vector<Triplet_t> entries_init_B_grad_cluster_i; std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred, gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors) for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); re_comps_cluster_i[j]->SetCovPars(pars); } sp_mat_t B_cluster_i; sp_mat_t D_inv_cluster_i; std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i); //Calculate Psi sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); D_sqrt.setIdentity(); D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5); sp_mat_t B_inv_D_sqrt; eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true); psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose(); }//end vecchia_approx_ else {//not vecchia_approx_ psi.resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); psi.setIdentity();//nugget effect CreateREComponents(num_data_pred, num_re_group_, data_indices_per_cluster_pred, cluster_i, re_group_levels_pred, num_data_per_cluster_pred, num_re_group_rand_coef_, re_group_rand_coef_data_pred, ind_effect_group_rand_coef_, num_gp_, gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, true, re_comps_cluster_i); for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); re_comps_cluster_i[j]->SetCovPars(pars); re_comps_cluster_i[j]->CalcSigma(); psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get())); } }//end not vecchia_approx_ psi *= cov_pars[0]; //write on output #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i); } } }//end predict_cov_mat }//end cluster_i with no observed data else {//Case 2: there exists observed data for this cluster_i (= typically the case) den_mat_t gp_coords_mat_pred; if (num_gp_ > 0) { std::vector<double> gp_coords_pred; for (int j = 0; j < dim_gp_coords_; ++j) { for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]); } } gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_); } vec_t mean_pred_id(num_data_per_cluster_pred[cluster_i]); T1 cov_mat_pred_id; if (predict_cov_mat) { cov_mat_pred_id = T1(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); } if (vecchia_approx_) {//vecchia_approx_ std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][ind_intercept_gp_]); int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i]; double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot); int mem_size = (int)(num_mem_d * 8. / 1000000.); if (mem_size > 4000) { Log::Warning("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size); } if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") { CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "order_obs_first_cond_all") { CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "order_pred_first") { CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") { CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") { CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } }//end vecchia_approx_ else {// not vecchia_approx_ CalcPred(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_group_levels_pred, re_group_rand_coef_data_pred, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); }//end not vecchia_approx_ //write on output #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { if (has_covariates_) { out_predict[data_indices_per_cluster_pred[cluster_i][i]] += mean_pred_id[i]; } else { out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i]; } } if (predict_cov_mat) { cov_mat_pred_id *= cov_pars[0]; #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i);//cov_mat_pred_id_den(j, i); } } } }//end cluster_i with data }//end loop over cluster } /*! * \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale) * Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters) * \param y_data Response variable data * \param[out] init_cov_pars Initial values for covariance parameters of RE components */ void FindInitCovPar(const double* y_data, double* init_cov_pars) { double mean = 0; for (int i = 0; i < num_data_; ++i) { mean += y_data[i]; } mean /= num_data_; double var = 0; for (int i = 0; i < num_data_; ++i) { var += (y_data[i] - mean) * (y_data[i] - mean); } var /= (num_data_ - 1); init_cov_pars[0] = var; int ind_par = 1; if (vecchia_approx_) {//Neither distances nor coordinates are saved for random coefficient GPs in the Vecchia approximation -> cannot find initial parameters -> just copy the ones from the intercept GP // find initial values for intercept process int num_par_j = ind_par_[1] - ind_par_[0]; vec_t pars = vec_t(num_par_j); re_comps_[unique_clusters_[0]][0]->FindInitCovPar(pars); for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } //set the same values to random coefficient processes for (int j = 1; j < num_gp_total_; ++j) { num_par_j = ind_par_[j + 1] - ind_par_[j]; for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } } } else { for (int j = 0; j < num_comps_total_; ++j) { int num_par_j = ind_par_[j + 1] - ind_par_[j]; vec_t pars = vec_t(num_par_j); re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars); for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } } } } int num_cov_par() { return(num_cov_par_); } /*! * \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting * Note: only used in GPBoost for combined Gaussian process tree-boosting (this is called from 'objective_function_->NewtonUpdateLeafValues'). It is assumed that 'CalcYAux' has been called before (from 'objective_function_->GetGradients'). * \param data_leaf_index Leaf index for every data point (array of size num_data) * \param num_leaves Number of leaves * \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves) * \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1. */ void NewtonUpdateLeafValues(const int* data_leaf_index, const int num_leaves, double* leaf_values, double marg_variance = 1.) { CHECK(y_aux_has_been_calculated_);//y_aux_ has already been calculated when calculating the gradient for finding the tree structure from 'GetGradients' in 'regression_objetive.hpp' den_mat_t HTPsiInvH(num_leaves, num_leaves); vec_t HTYAux(num_leaves); HTPsiInvH.setZero(); HTYAux.setZero(); for (const auto& cluster_i : unique_clusters_) { //Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) { entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.); } den_mat_t HTPsiInvH_cluster_i; if (vecchia_approx_) { sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx. H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end()); HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F) sp_mat_t BH = B_[cluster_i] * H_cluster_i; HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH); } else { sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves); H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end()); HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F) if (use_woodbury_identity_) { sp_mat_t ZtH_cluster_i = Zt_[cluster_i] * H_cluster_i; T1 MInvSqrtZtH; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvSqrtZtH = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtH_cluster_i; } else { CalcPsiInvSqrtH(ZtH_cluster_i, MInvSqrtZtH, cluster_i, true); } HTPsiInvH_cluster_i = H_cluster_i.transpose() * H_cluster_i - MInvSqrtZtH.transpose() * MInvSqrtZtH; } else { T1 PsiInvSqrtH; CalcPsiInvSqrtH(H_cluster_i, PsiInvSqrtH, cluster_i, true); HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH; } } HTPsiInvH += HTPsiInvH_cluster_i; } HTYAux *= marg_variance; vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux); for (int i = 0; i < num_leaves; ++i) { leaf_values[i] = new_leaf_values[i]; } } private: /*! \brief Number of data points */ data_size_t num_data_; // CLUSTERs of INDEPENDENT REALIZATIONS /*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */ std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_; /*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */ std::map<gp_id_t, int> num_data_per_cluster_; /*! \brief Number of independent realizations of the REs/GPs */ data_size_t num_clusters_; /*! \brief Unique labels of independent realizations */ std::vector<gp_id_t> unique_clusters_; // GROUPED RANDOM EFFECTS /*! \brief Number of grouped (intercept) random effects */ data_size_t num_re_group_ = 0; /*! \brief Number of grouped random coefficients */ data_size_t num_re_group_rand_coef_ = 0; /*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */ std::vector<int> ind_effect_group_rand_coef_; /*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */ data_size_t num_re_group_total_ = 0; // GAUSSIAN PROCESS /*! \brief 1 if there is a Gaussian process 0 otherwise */ data_size_t num_gp_ = 0; /*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove? int8_t GP_type_ = 0; /*! \brief Number of random coefficient GPs */ data_size_t num_gp_rand_coef_ = 0; /*! \brief Total number of GPs (random intercepts plus random coefficients) */ data_size_t num_gp_total_ = 0; /*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */ int ind_intercept_gp_; /*! \brief Dimension of the coordinates (=number of features) for Gaussian process */ int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs /*! \brief Type of covariance(kernel) function for Gaussian processes */ string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs /*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */ double cov_fct_shape_ = 0.; /*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */ std::map<gp_id_t, std::vector<std::shared_ptr<RECompBase<T1>>>> re_comps_; /*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] and ind_par_[i+1] - 1 are the indices of the first and last parameter of component number i (counting starts at 1) */ std::vector<data_size_t> ind_par_; /*! \brief Number of covariance parameters */ data_size_t num_cov_par_; /*! \brief Total number of random effect components (grouped REs plus other GPs) */ data_size_t num_comps_total_ = 0; /*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky decomposition solver of covariance matrices Psi */ std::map<gp_id_t, T2> chol_facts_solve_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else? std::map<gp_id_t, T1> chol_facts_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */ //TODO: remove and construct on demand? std::map<gp_id_t, T1> Id_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */ std::map<gp_id_t, cs> Id_cs_; /*! \brief Key: labels of independent realizations of REs/GPs, value: data y */ std::map<gp_id_t, vec_t> y_; /*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */ std::map<gp_id_t, vec_t> y_aux_; /*! \brief Key: labels of independent realizations of REs/GPs, value: L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when use_woodbury_identity_==true) */ std::map<gp_id_t, vec_t> y_tilde_; /*! \brief Key: labels of independent realizations of REs/GPs, value: Z * L ^ -T * L ^ -1 * Z ^ T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when use_woodbury_identity_==true) */ std::map<gp_id_t, vec_t> y_tilde2_; /*! \brief Indicates whether y_aux_ has been calculated */ bool y_aux_has_been_calculated_ = false; /*! \brief Collects inverse covariance matrices Psi^{-1} (usually not saved, but used e.g. in Fisher scoring without the Vecchia approximation) */ std::map<gp_id_t, T1> psi_inv_; /*! \brief Copy of response data (used only in case there are also linear covariates since then y_ is modified during the algorithm) */ vec_t y_vec_; /*! \brief If true, a symbolic decomposition is first done when calculating the Cholesky factor of the covariance matrix (only for sparse matrices) */ bool do_symbolic_decomposition_ = true; /*! \brief If true, the Woodbury, Sherman and Morrison matrix inversion formula is used for calculating the inverse of the covariance matrix (only used if there are only grouped REs and no Gaussian processes) */ bool use_woodbury_identity_ = false; /*! \brief Collects matrices Z^T (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */ std::map<gp_id_t, sp_mat_t> Zt_; /*! \brief Collects matrices Z^TZ (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */ std::map<gp_id_t, sp_mat_t> ZtZ_; /*! \brief Collects vectors Z^Ty (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects) */ std::map<gp_id_t, vec_t> Zty_; /*! \brief Cumulative number of random effects for components (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */ std::map<gp_id_t, std::vector<data_size_t>> cum_num_rand_eff_;//The random effects of component j start at cum_num_rand_eff_[0][j]+1 and end at cum_num_rand_eff_[0][j+1] /*! \brief Sum of squared entries of Z_j for every random effect component (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects) */ std::map<gp_id_t, std::vector<double>> Zj_square_sum_; /*! \brief Collects matrices Z^T * Z_j for every random effect component (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects) */ std::map<gp_id_t, std::vector<sp_mat_t>> ZtZj_; /*! \brief Collects matrices L^-1 * Z^T * Z_j for every random effect component (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects and when Fisher scoring is done) */ std::map<gp_id_t, std::vector<T1>> LInvZtZj_; /*! \brief Inverse covariance matrices Sigma^-1 of random effects. This is only used if use_woodbury_identity_==true (if there are only grouped REs). */ std::map<gp_id_t, sp_mat_t> SigmaI_; // COVARIATE DATA for linear regression term /*! \brief If true, the model linearly incluses covariates */ bool has_covariates_ = false; /*! \brief Number of covariates */ int num_coef_; /*! \brief Covariate data */ den_mat_t X_; // OPTIMIZER PROPERTIES /*! \brief List of supported optimizers for covariance parameters */ const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring" }; /*! \brief List of supported optimizers for regression coefficients */ const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls" }; /*! \brief List of supported convergence criteria used for terminating the optimization algorithm */ const std::set<string_t> SUPPORTED_CONF_CRIT_{ "relative_change_in_parameters", "relative_change_in_log_likelihood" }; /*! \brief Maximal number of steps for which step halving for the learning rate is done */ int MAX_NUMBER_HALVING_STEPS_ = 30; // VECCHIA APPROXIMATION for GP /*! \brief If true, the Veccia approximation is used for the Gaussian process */ bool vecchia_approx_ = false; /*! \brief If true, a memory optimized version of the Vecchia approximation is used (at the expense of being slightly slower). THiS IS CURRENTLY NOT IMPLEMENTED */ bool vecchia_approx_optim_memory = false; /*! \brief The number of neighbors used in the Vecchia approximation */ int num_neighbors_; /*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */ string_t vecchia_ordering_ = "none"; /*! \brief The number of neighbors used in the Vecchia approximation for making predictions */ int num_neighbors_pred_; /*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */ string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm /*! \brief List of supported covariance functions */ const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only", "order_obs_first_cond_all", "order_pred_first", "latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" }; /*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */ std::map<gp_id_t, std::vector<std::vector<int>>> nearest_neighbors_; /*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */ std::map<gp_id_t, std::vector<den_mat_t>> dist_obs_neighbors_; /*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */ std::map<gp_id_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m. /*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */ std::map<gp_id_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_; /*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */ std::map<gp_id_t, sp_mat_t> B_; /*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */ std::map<gp_id_t, sp_mat_t> D_inv_; /*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */ std::map<gp_id_t, std::vector<sp_mat_t>> B_grad_; /*! \brief Collects derivatives of matrices D for Vecchia approximation */ std::map<gp_id_t, std::vector<sp_mat_t>> D_grad_; /*! \brief Triplets for intializing the matrices B */ std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_; /*! \brief Triplets for intializing the matrices B_grad */ std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_grad_; /*! \brief Variance of idiosyncratic error term (nugget effect) */ double sigma2_; // PREDICTION /*! \brief Cluster IDs for prediction */ std::vector<gp_id_t> cluster_ids_data_pred_; /*! \brief Levels of grouped RE for prediction */ std::vector<std::vector<string_t>> re_group_levels_pred_; /*! \brief Covariate data for grouped random RE for prediction */ std::vector<double> re_group_rand_coef_data_pred_; /*! \brief Coordinates for GP for prediction */ std::vector<double> gp_coords_data_pred_; /*! \brief Covariate data for random GP for prediction */ std::vector<double> gp_rand_coef_data_pred_; /*! \brief Covariate data for linear regression term */ std::vector<double> covariate_data_pred_; /*! \brief Nesterov schedule */ double NesterovSchedule(int iter, int momentum_schedule_version = 0, double nesterov_acc_rate = 0.5, int momentum_offset = 2) { if (iter < momentum_offset) { return(0.); } else { if (momentum_schedule_version == 0) { return(nesterov_acc_rate); } else if (momentum_schedule_version == 1) { return(1. - (3. / (6. + iter))); } else { return(0.); } } } /*! \brief mutex for threading safe call */ std::mutex mutex_; /*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void ConstructI(gp_id_t cluster_i) { int dim_I = use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i]; T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix I.setIdentity(); Id_.insert({ cluster_i, I }); cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects. Id_cs.nzmax = dim_I; Id_cs.m = dim_I; Id_cs.n = dim_I; Id_[cluster_i].makeCompressed(); Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr()); Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr()); Id_cs.x = Id_[cluster_i].valuePtr(); Id_cs.nz = -1; Id_cs_.insert({ cluster_i, Id_cs }); } /*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void ConstructI(gp_id_t cluster_i) { int dim_I = use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i]; T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix I.setIdentity(); Id_.insert({ cluster_i, I }); } /*! * \brief Set response variable data y_ (and calculate Z^T * y if use_woodbury_identity_ == true) * \param y_data Response variable data */ void SetY(const double* y_data) { if (num_clusters_ == 1 && vecchia_ordering_ == "none") { y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_); } else { for (const auto& cluster_i : unique_clusters_) { y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying? for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]]; } } } if (use_woodbury_identity_) { CalcZtY(); } } /*! * \brief Calculate Z^T*y (use only when use_woodbury_identity_ == true) */ void CalcZtY() { for (const auto& cluster_i : unique_clusters_) { Zty_[cluster_i] = Zt_[cluster_i] * y_[cluster_i]; } } /*! * \brief Get y_aux = Psi^-1*y * \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_ */ void GetYAux(double* y_aux) { CHECK(y_aux_has_been_calculated_); if (num_clusters_ == 1 && vecchia_ordering_ == "none") { for (int j = 0; j < num_data_; ++j) { y_aux[j] = y_aux_[unique_clusters_[0]][j]; } } else { for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j]; } } } } /*! * \brief Get y_aux = Psi^-1*y * \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_ */ void GetYAux(vec_t& y_aux) { CHECK(y_aux_has_been_calculated_); if (num_clusters_ == 1 && vecchia_ordering_ == "none") { y_aux = y_aux_[unique_clusters_[0]]; } else { for (const auto& cluster_i : unique_clusters_) { y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i]; } } } /*! * \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if sparse matrices are used * \param psi Covariance matrix for which the Cholesky decomposition should be done * \param cluster_i Cluster index for which the Cholesky factor is calculated * \param analyze_pattern If true, the pattern is analyzed as well (only for sparse matrices) */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) { if (analyze_pattern) { chol_facts_solve_[cluster_i].analyzePattern(psi); } chol_facts_solve_[cluster_i].factorize(psi); chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL(); chol_facts_[cluster_i].makeCompressed(); } /*! * \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if dense matrices are used * \param psi Covariance matrix for which the Cholesky decomposition should be done * \param cluster_i Cluster index for which the Cholesky factor is calculated * \param analyze_pattern If true, the pattern is analyzed as well (only for sparse matrices) */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) { if (analyze_pattern) { Log::Warning("Pattern of Cholesky factor is not analyzed when dense matrices are used."); } chol_facts_solve_[cluster_i].compute(psi); chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL(); } /*! * \brief Caclulate Psi^(-1) if sparse matrices are used * \param psi_inv[out] Inverse covariance matrix * \param cluster_i Cluster index for which Psi^(-1) is calculated */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) { if (use_woodbury_identity_) { sp_mat_t MInvSqrtZt; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvSqrtZt = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * Zt_[cluster_i]; } else { sp_mat_t L_inv; eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Id_[cluster_i], L_inv, true); MInvSqrtZt = L_inv * Zt_[cluster_i]; ////Alternative option (crashes when eigen_sp_Lower_sp_RHS_cs_solve uses sp_Lower_sp_RHS_cs_solve / cs_spsolve due to Eigen bug) //eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Zt_[cluster_i], MInvSqrtZt, true); } psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;//this is slow since n can be large (O(n^2*m)) psi_inv.diagonal().array() += 1.0; } else { //Using CSparse function 'cs_spsolve' cs L_cs = cs();//Prepare LHS L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros(); L_cs.m = num_data_per_cluster_[cluster_i]; L_cs.n = num_data_per_cluster_[cluster_i]; L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr()); L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr()); L_cs.x = chol_facts_[cluster_i].valuePtr(); L_cs.nz = -1; //Invert Cholesky factor sp_mat_t L_inv; sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true); psi_inv = L_inv.transpose() * L_inv; ////Version 2: doing sparse solving "by hand" but ignoring sparse RHS //const double* val = chol_facts_[cluster_i].valuePtr(); //const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); //const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); //den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]); //for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]); //} //const sp_mat_t L_inv = L_inv_dens.sparseView(); //psi_inv = L_inv.transpose() * L_inv; ////Version 1: let Eigen do the solving //cpsi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); } } /*! * \brief Caclulate Psi^(-1) if dense matrices are used * \param psi_inv[out] Inverse covariance matrix * \param cluster_i Cluster index for which Psi^(-1) is calculated */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) { if (use_woodbury_identity_) {//should currently not be called as use_woodbury_identity_ is only true for grouped REs only i.e. sparse matrices T3 MInvSqrtZt; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvSqrtZt = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * Zt_[cluster_i]; } else { MInvSqrtZt = Zt_[cluster_i]; #pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization? for (int j = 0; j < (int)MInvSqrtZt.cols(); ++j) { L_solve(chol_facts_[cluster_i].data(), (int)chol_facts_[cluster_i].cols(), MInvSqrtZt.data() + j * (int)MInvSqrtZt.cols()); } } psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt; psi_inv.diagonal().array() += 1.0; } else { ////Version 1 //psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); //Version 2: solving by hand T3 L_inv = Id_[cluster_i]; #pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization? for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]); } //chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower psi_inv = L_inv.transpose() * L_inv; // Using dpotri from LAPACK does not work since LAPACK is not installed //int info = 0; //int n = num_data_per_cluster_[cluster_i]; //int lda = num_data_per_cluster_[cluster_i]; //char* uplo = "L"; //den_mat_t M = chol_facts_[cluster_i]; //BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info); } } /*! * \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' and if use_woodbury_identity_ == true * \param H Right-hand side matrix H * \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H) * \param cluster_i Cluster index for which Psi^(-0.5)H is calculated * \param lower true if chol_facts_[cluster_i] is a lower triangular matrix */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, gp_id_t cluster_i, bool lower = true) { eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, lower); //TODO: use eigen_sp_Lower_sp_RHS_cs_solve -> faster? (currently this crashes due to Eigen bug, see the definition of sp_Lower_sp_RHS_cs_solve for more details) } /*! * \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' and if use_woodbury_identity_ == true * \param H Right-hand side matrix H * \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H) * \param cluster_i Cluster index for which Psi^(-0.5)H is calculated * \param lower true if chol_facts_[cluster_i] is a lower triangular matrix */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, gp_id_t cluster_i, bool lower = true) { PsiInvSqrtH = den_mat_t(H); #pragma omp parallel for schedule(static) for (int j = 0; j < H.cols(); ++j) { if (lower) { L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]); } else { L_t_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]); } } } ///*! //* \brief Caclulate X^TPsi^(-1)X //* \param X Covariate data matrix X //* \param[out] XT_psi_inv_X X^TPsi^(-1)X //*/ // template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > // void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // den_mat_t BX; // if (num_clusters_ == 1) { // gp_id_t cluster0 = unique_clusters_[0]; // if (vecchia_approx_) { // BX = B_[cluster0] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX; // } // else { // BX = X; // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) { // L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]); // } // XT_psi_inv_X = BX.transpose() * BX; // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // BX = X(data_indices_per_cluster_[cluster_i], Eigen::all); // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]); // } // XT_psi_inv_X += (BX.transpose() * BX); // } // } // } // } // //same for sparse matrices // template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > // void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // den_mat_t BX; // if (num_clusters_ == 1) { // gp_id_t cluster0 = unique_clusters_[0]; // if (vecchia_approx_) { // BX = B_[cluster0] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX; // } // else { // BX = X; // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) { // sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(), // num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]); // } // XT_psi_inv_X = BX.transpose() * BX; // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // BX = X(data_indices_per_cluster_[cluster_i], Eigen::all); // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(), // num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]); // } // XT_psi_inv_X += (BX.transpose() * BX); // } // } // } // } /*! * \brief Caclulate X^TPsi^(-1)X * \param X Covariate data matrix X * \param[out] XT_psi_inv_X X^TPsi^(-1)X */ void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { if (num_clusters_ == 1 && vecchia_ordering_ == "none") {//only one cluster / idependent GP realization if (vecchia_approx_) { den_mat_t BX = B_[unique_clusters_[0]] * X; XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX; } else { if (use_woodbury_identity_) { den_mat_t ZtX = Zt_[unique_clusters_[0]] * X; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal den_mat_t MInvSqrtZtX = chol_facts_[unique_clusters_[0]].diagonal().array().inverse().matrix().asDiagonal() * ZtX; XT_psi_inv_X = X.transpose() * X - MInvSqrtZtX.transpose() * MInvSqrtZtX; } else { //TODO: use only one forward solve (sp_L_solve for sparse and sp_L_solve for dense matrices) instead of using Eigens solver which does two solves. But his requires a templace function since the Cholesky factor is T1 XT_psi_inv_X = X.transpose() * X - ZtX.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(ZtX); } } else { XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X); } } } else {//more than one cluster / idependent GP realization XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); XT_psi_inv_X.setZero(); den_mat_t BX; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; } else { if (use_woodbury_identity_) { den_mat_t ZtX = Zt_[cluster_i] * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal den_mat_t MInvSqrtZtX = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtX; XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) - MInvSqrtZtX.transpose() * MInvSqrtZtX; } else { XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) - ZtX.transpose() * chol_facts_solve_[cluster_i].solve(ZtX); } } else { XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)); } } } } } /*! * \brief Initialize data structures for handling independent realizations of the Gaussian processes. Answers written on arguments. * \param num_data Number of data points * \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) * \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization * \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param[out] unique_clusters Unique labels of independent realizations * \param[out] num_clusters Number of independent clusters */ void SetUpGPIds(data_size_t num_data, const gp_id_t* cluster_ids_data, std::map<gp_id_t, int>& num_data_per_cluster, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, std::vector<gp_id_t>& unique_clusters, data_size_t& num_clusters) { if (cluster_ids_data != nullptr) { for (int i = 0; i < num_data; ++i) { if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i] unique_clusters.push_back(cluster_ids_data[i]); num_data_per_cluster.insert({ cluster_ids_data[i], 1 }); std::vector<int> id; id.push_back(i); data_indices_per_cluster.insert({ cluster_ids_data[i], id }); } else { num_data_per_cluster[cluster_ids_data[i]] += 1; data_indices_per_cluster[cluster_ids_data[i]].push_back(i); } } num_clusters = (data_size_t)unique_clusters.size(); } else { unique_clusters.push_back(0); num_data_per_cluster.insert({ 0, num_data }); num_clusters = 1; std::vector<int> gp_id_vec(num_data); for (int i = 0; i < num_data; ++i) { gp_id_vec[i] = i; } data_indices_per_cluster.insert({ 0, gp_id_vec }); } } /*! * \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels * \param num_data Number of data points * \param num_re_group Number of grouped random effects * \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects */ void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group, const char* re_group_data, std::vector<std::vector<string_t>>& re_group_levels) { int char_start = 0; for (int ire = 0; ire < num_re_group; ++ire) {//TODO: catch / report potential error if format of re_group_data is not correct for (int id = 0; id < num_data; ++id) { int number_chars = 0; while (re_group_data[char_start + number_chars] != '\0') { number_chars++; } re_group_levels[ire][id] = std::string(re_group_data + char_start); char_start += number_chars + 1; } } } /*! * \brief Initialize individual component models and collect them in a containter * \param num_data Number of data points * \param num_re_group Number of grouped random effects * \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points * \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed * \param Group levels for every grouped random effect * \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization * \param num_re_group_rand_coef Number of grouped random coefficients * \param re_group_rand_coef_data Covariate data for grouped random coefficients * \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1. * \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting) * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian processes * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance) * \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs * \param calculateZZt If true, the matrix Z*Z^T is calculated for grouped random effects and saved (usually not needed if Woodbury identity is used) * \param[out] re_comps_cluster_i Container that collects the individual component models */ void CreateREComponents(data_size_t num_data, data_size_t num_re_group, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i, std::vector<std::vector<string_t>>& re_group_levels, std::map<gp_id_t, int>& num_data_per_cluster, data_size_t num_re_group_rand_coef, const double* re_group_rand_coef_data, std::vector<int>& ind_effect_group_rand_coef, data_size_t num_gp, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, int ind_intercept_gp, bool calculateZZt, std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i) { //Grouped REs if (num_re_group > 0) { for (int j = 0; j < num_re_group; ++j) { std::vector<re_group_t> group_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id])); } re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(group_data, calculateZZt))); } //Random slopes if (num_re_group_rand_coef > 0) { for (int j = 0; j < num_re_group_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]); } std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0 re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>( re_comp->group_data_, re_comp->map_group_label_index_, re_comp->num_group_, rand_coef_data, calculateZZt))); } } } //GPs if (num_gp > 0) { std::vector<double> gp_coords; for (int j = 0; j < dim_gp_coords; ++j) { for (const auto& id : data_indices_per_cluster[cluster_i]) { gp_coords.push_back(gp_coords_data[j * num_data + id]); } } den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, true))); //Random slopes if (num_gp_rand_coef > 0) { for (int j = 0; j < num_gp_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]); } std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_cluster_i[ind_intercept_gp]); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(re_comp->dist_, re_comp->has_Z_, &re_comp->Z_, rand_coef_data, cov_fct, cov_fct_shape))); } } } } /*! * \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used * \param num_data Number of data points * \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points * \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed * \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian processes * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance) * \param[out] re_comps_cluster_i Container that collects the individual component models * \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors * \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors * \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations * \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B * \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad * \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j * \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering * \param num_neighbors The number of neighbors used in the Vecchia approximation */ void CreateREComponentsVecchia(data_size_t num_data, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i, std::map<gp_id_t, int>& num_data_per_cluster, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i, std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i, std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i, std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, string_t vecchia_ordering = "none", int num_neighbors = 30) { if (vecchia_ordering == "random") { unsigned seed = 0; std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed)); } std::vector<double> gp_coords; for (int j = 0; j < dim_gp_coords; ++j) { for (const auto& id : data_indices_per_cluster[cluster_i]) { gp_coords.push_back(gp_coords_data[j * num_data + id]); } } den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, false))); find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) { for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) { entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.)); entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.)); } entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A } //Random coefficients if (num_gp_rand_coef > 0) { for (int j = 0; j < num_gp_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]); } re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(rand_coef_data, cov_fct, cov_fct_shape))); //save random coefficient data in the form ot outer product matrices #pragma omp for schedule(static) for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) { if (j == 0) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef); } int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1); vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[i]; if (i > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } } /*! * \brief Set the covariance parameters of the components * \param cov_pars Covariance parameters */ void SetCovParsComps(const vec_t& cov_pars) { CHECK(cov_pars.size() == num_cov_par_); sigma2_ = cov_pars[0]; for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); re_comps_[cluster_i][j]->SetCovPars(pars); } } } /*! * \brief Transform the covariance parameters to the scake on which the MLE is found * \param cov_pars_trans Covariance parameters * \param[out] pars_trans Transformed covariance parameters */ void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) { CHECK(cov_pars.size() == num_cov_par_); cov_pars_trans = vec_t(num_cov_par_); cov_pars_trans[0] = cov_pars[0]; for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); vec_t pars_trans = pars; re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans); cov_pars_trans.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_trans; } } /*! * \brief Back-transform the covariance parameters to the original scale * \param cov_pars Covariance parameters * \param[out] cov_pars_orig Back-transformed, original covariance parameters */ void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) { CHECK(cov_pars.size() == num_cov_par_); cov_pars_orig = vec_t(num_cov_par_); cov_pars_orig[0] = cov_pars[0]; for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]); vec_t pars_orig = pars; re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig); cov_pars_orig.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_orig; } } /*! * \brief Calculate covariance matrices of the components */ void CalcSigmaComps() { for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_comps_total_; ++j) { re_comps_[cluster_i][j]->CalcSigma(); } } } /*! * \brief Construct inverse covariance matrix Sigma^-1 if there are onla grouped random effecs (this is then a diagonal matrix) * \param[out] SigmaI Inverse covariance matrix of random effects (a diagonal matrix) * \param cluster_i Cluster index for which SigmaI is constructed */ void CalcSigmaIGroupedREsOnly(sp_mat_t& SigmaI, gp_id_t cluster_i) { std::vector<Triplet_t> triplets; triplets.reserve(cum_num_rand_eff_[cluster_i][num_comps_total_]); for (int j = 0; j < num_comps_total_; ++j) { double sigmaI = re_comps_[cluster_i][j]->cov_pars_[0]; sigmaI = 1.0 / sigmaI; for (int i = cum_num_rand_eff_[cluster_i][j]; i < cum_num_rand_eff_[cluster_i][j + 1]; ++i) { triplets.emplace_back(i, i, sigmaI); } } SigmaI = sp_mat_t(cum_num_rand_eff_[cluster_i][num_comps_total_], cum_num_rand_eff_[cluster_i][num_comps_total_]); SigmaI.setFromTriplets(triplets.begin(), triplets.end()); } /*! * \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP) * \param num_data_cluster_i Number of data points * \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation) * \param re_comps_cluster_i Container that collects the individual component models * \param nearest_neighbors_cluster_i Collects indices of nearest neighbors * \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors * \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations * \param entries_init_B_cluster_i Triplets for intializing the matrices B * \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad * \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j * \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation * \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation * \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation * \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation * \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true * \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back) * \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance */ void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i, std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i, std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i, std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) { int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_; int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget; //Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel) B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below) if (!transf_scale) { D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale } if (calc_gradient) { B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A) D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D for (int ipar = 0; ipar < num_par_gp; ++ipar) { B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i); B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end()); D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i); D_grad_cluster_i[ipar].setIdentity();//Put 0 on the diagonal D_grad_cluster_i[ipar].diagonal().array() = 0.;//TODO: maybe change initialization of this matrix by also using triplets -> faster? } }//end initialization #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_cluster_i; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives den_mat_t cov_mat_obs_neighbors(1, num_nn); den_mat_t cov_mat_between_neighbors(num_nn, num_nn); std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp); if (i > 0) { for (int j = 0; j < num_gp_total_; ++j) { int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors if (j == 0) { re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct() cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; if (calc_gradient) { cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); } } }//end loop over components j }//end if(i>1) //Calculate matrices B and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0]; if (!transf_scale) { d_comp_j *= nugget_var; } if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } D_inv_cluster_i.coeffRef(i, i) += d_comp_j; if (calc_gradient) { if (transf_scale) { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal } else { if (j == 0) { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale } else { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } } } } if (calc_gradient && calc_gradient_nugget) { D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.; } //2. remaining terms if (i > 0) { if (transf_scale) { cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect } else { cov_mat_between_neighbors.diagonal().array() += nugget_var; } den_mat_t A_i(1, num_nn); den_mat_t cov_mat_between_neighbors_inv; den_mat_t A_i_grad_sigma2; if (calc_gradient) { // Note: it is faster (approx. 1.5-2 times) to first calculate cov_mat_between_neighbors_inv and the multiply this with the matrices below // instead of always using the Cholesky factor of cov_mat_between_neighbors to calculate cov_mat_between_neighbors_inv * (a matrix) den_mat_t I(num_nn, num_nn); I.setIdentity(); cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I); A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv; if (calc_gradient_nugget) { A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv; } } else { A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); } for (int inn = 0; inn < num_nn; ++inn) { B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn); } D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); if (calc_gradient) { den_mat_t A_i_grad(1, num_nn); for (int j = 0; j < num_gp_total_; ++j) { int ind_first_par = j * num_par_comp; for (int ipar = 0; ipar < num_par_comp; ++ipar) { A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) - (cov_mat_obs_neighbors * cov_mat_between_neighbors_inv * cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv); for (int inn = 0; inn < num_nn; ++inn) { B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn); } if (ipar == 0) { D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) + (A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance } else { D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) + (A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range } } } if (calc_gradient_nugget) { for (int inn = 0; inn < num_nn; ++inn) { B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn); } D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0); } }//end calc_gradient }//end if i > 0 D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i); }//end loop over data i } /*! * \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix) * \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation) * \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation) * \param nugget_var Nugget effect variance parameter sigma^2 (used only if vecchia_approx_==true and transf_scale ==false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out) * \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation) */ void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) { if (vecchia_approx_) { for (const auto& cluster_i : unique_clusters_) { int num_data_cl_i = num_data_per_cluster_[cluster_i]; CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i], dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i], entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i], B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget); } } else { CalcSigmaComps(); for (const auto& cluster_i : unique_clusters_) { if (use_woodbury_identity_) {//Use Woodburry matrix inversion formula: used only if there are only grouped REs if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i); chol_facts_[cluster_i] = (SigmaI_[cluster_i].diagonal().array() + ZtZ_[cluster_i].diagonal().array()).sqrt().matrix().asDiagonal(); } else { sp_mat_t SigmaI; CalcSigmaIGroupedREsOnly(SigmaI, cluster_i); T1 SigmaIplusZtZ = SigmaI + ZtZ_[cluster_i]; CalcChol<T1>(SigmaIplusZtZ, cluster_i, do_symbolic_decomposition_); } }//end use_woodbury_identity_ else { T1 psi; psi.resize(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); psi.setIdentity();//nugget effect for (int j = 0; j < num_comps_total_; ++j) { psi += (*(re_comps_[cluster_i][j]->GetZSigmaZt())); } CalcChol<T1>(psi, cluster_i, do_symbolic_decomposition_); } } do_symbolic_decomposition_ = false;//Symbolic decompostion done only once (if sparse matrices are used) } } /*! * \brief Calculate Psi^-1*y (and save in y_aux_) * \param marg_variance The marginal variance. Default = 1. */ void CalcYAux(double marg_variance = 1.) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (vecchia_approx_) { if (B_.find(cluster_i) == B_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i]; }//end vecchia_approx_ else {//not vecchia_approx_ if (chol_facts_.find(cluster_i) == chol_facts_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } if (use_woodbury_identity_) { vec_t MInvZty; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal MInvZty = (Zty_[cluster_i].array() / (chol_facts_[cluster_i].diagonal().array().square())).matrix(); } else { MInvZty = chol_facts_solve_[cluster_i].solve(Zty_[cluster_i]); } y_aux_[cluster_i] = y_[cluster_i] - Zt_[cluster_i].transpose() * MInvZty; } else { //Version 1: let Eigen do the computation y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]); //// Version 2 'do-it-yourself' (for sparse matrices) //y_aux_[cluster_i] = y_[cluster_i]; //const double* val = chol_facts_[cluster_i].valuePtr(); //const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); //const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); //sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data()); //sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data()); } }//end non-Vecchia if (marg_variance != 1.) { y_aux_[cluster_i] /= marg_variance; } } y_aux_has_been_calculated_ = true; } /*! * \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if sparse matrices are used * \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcYtilde(bool also_calculate_ytilde2 = false) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal y_tilde_[cluster_i] = (Zty_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix(); if (also_calculate_ytilde2) { y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix()); } } else { y_tilde_[cluster_i] = Zty_[cluster_i]; const double* val = chol_facts_[cluster_i].valuePtr(); const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); sp_L_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data()); if (also_calculate_ytilde2) { vec_t ytilde_aux = y_tilde_[cluster_i]; sp_L_t_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data()); y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux; } } } } /*! * \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if dense matrices are used * \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcYtilde(bool also_calculate_ytilde2 = false) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal y_tilde_[cluster_i] = y_tilde_[cluster_i] = (Zty_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix(); if (also_calculate_ytilde2) { y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix()); } } else { y_tilde_[cluster_i] = Zty_[cluster_i]; L_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data()); if (also_calculate_ytilde2) { vec_t ytilde_aux = y_tilde_[cluster_i]; L_t_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data()); y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux; } } } } /*! * \brief Calculate y^T*Psi^-1*y if sparse matrices are used * \param[out] yTPsiInvy y^T*Psi^-1*y * \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored * \param cluster_ind Cluster index * \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not use_woodbury_identity_) * \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for use_woodbury_identity_) */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters = true, gp_id_t cluster_ind = 1, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) { yTPsiInvy = 0; std::vector<gp_id_t> clusters_iterate; if (all_clusters) { clusters_iterate = unique_clusters_; } else { clusters_iterate = std::vector<gp_id_t>(1); clusters_iterate[0] = cluster_ind; } for (const auto& cluster_i : clusters_iterate) { if (y_.find(cluster_i) == y_.end()) { Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (vecchia_approx_) { if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { if (B_.find(cluster_i) == B_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i]; yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0); } }//end vecchia_approx_ else {//not vecchia_approx_ if (chol_facts_.find(cluster_i) == chol_facts_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } if (use_woodbury_identity_) { if (!CalcYtilde_already_done) { CalcYtilde<T1>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) { Log::Fatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first."); } yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0); }//end use_woodbury_identity_ else {//not use_woodbury_identity_ if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { vec_t y_aux_sqrt = y_[cluster_i]; const double* val = chol_facts_[cluster_i].valuePtr(); const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_sqrt.data()); yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0); } }//end not use_woodbury_identity_ }//end not vecchia_approx_ } } /*! * \brief Calculate y^T*Psi^-1*y if dense matrices are used * \param[out] yTPsiInvy y^T*Psi^-1*y * \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored * \param cluster_ind Cluster index * \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not use_woodbury_identity_) * \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for use_woodbury_identity_) */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters = true, gp_id_t cluster_ind = 1, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) { yTPsiInvy = 0; std::vector<gp_id_t> clusters_iterate; if (all_clusters) { clusters_iterate = unique_clusters_; } else { clusters_iterate = std::vector<gp_id_t>(1); clusters_iterate[0] = cluster_ind; } for (const auto& cluster_i : clusters_iterate) { if (y_.find(cluster_i) == y_.end()) { Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (vecchia_approx_) { if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { if (B_.find(cluster_i) == B_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i]; yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0); } }//end vecchia_approx_ else {//not vecchia_approx_ if (chol_facts_.find(cluster_i) == chol_facts_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } if (use_woodbury_identity_) { if (!CalcYtilde_already_done) { CalcYtilde<T1>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) } else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) { Log::Fatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first."); } yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0); }//end use_woodbury_identity_ else {//not use_woodbury_identity_ if (CalcYAux_already_done) { yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0); } else { vec_t y_aux_sqrt = y_[cluster_i]; L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], y_aux_sqrt.data()); yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0); } }//end not use_woodbury_identity_ }//end not vecchia_approx_ } } /*! * \brief Calculate gradient for covariance parameters * This assumes that the covariance matrix has been factorized (by 'CalcCovFactor') and that y_aux or y_tilde/y_tilde2 (if use_woodbury_identity_) have been calculated (by 'CalcYAux' or 'CalcYtilde') * \param cov_pars Covariance parameters * \param[out] grad Gradient w.r.t. covariance parameters * \param include_error_var If true, the gradient for the marginal variance parameter (=error, nugget effect) is also calculated, otherwise not (set this to true if the nugget effect is not calculated by using the closed-form solution) * \param save_psi_inv If true, the inverse covariance matrix Psi^-1 is saved for reuse later (e.g. when calculating the Fisher information in Fisher scoring). This option is ignored if the Vecchia approximation is used. */ void CalcCovParGrad(vec_t& cov_pars, vec_t& cov_grad, bool include_error_var = false, bool save_psi_inv = false) { if (include_error_var) { cov_grad = vec_t::Zero(num_cov_par_); } else { cov_grad = vec_t::Zero(num_cov_par_ - 1); } int first_cov_par = include_error_var ? 1 : 0; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) {//Vechia approximation vec_t u(num_data_per_cluster_[cluster_i]); vec_t uk(num_data_per_cluster_[cluster_i]); if (include_error_var) { u = B_[cluster_i] * y_[cluster_i]; cov_grad[0] += -1. * ((double)(u.transpose() * D_inv_[cluster_i] * u)) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.; u = D_inv_[cluster_i] * u; } else { u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here? } for (int j = 0; j < num_comps_total_; ++j) { int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_; for (int ipar = 0; ipar < num_par_comp; ++ipar) { uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i]; cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / sigma2_ + 0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal())); } } }//end vecchia_approx_ else {//not vecchia_approx_ if (use_woodbury_identity_) { if (include_error_var) { double yTPsiInvy; CalcYTPsiIInvY<T1>(yTPsiInvy, false, cluster_i, true, true); cov_grad[0] += -1. * yTPsiInvy / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.; } std::vector<T1> LInvZtZj_cluster_i; if (save_psi_inv) { LInvZtZj_[cluster_i].clear(); LInvZtZj_cluster_i = std::vector<T1>(num_comps_total_); } for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); vec_t y_tilde_j = (*Z_j).transpose() * y_[cluster_i]; vec_t y_tilde2_j = (*Z_j).transpose() * y_tilde2_[cluster_i]; double yTPsiIGradPsiPsiIy = y_tilde_j.transpose() * y_tilde_j - 2. * (double)(y_tilde_j.transpose() * y_tilde2_j) + y_tilde2_j.transpose() * y_tilde2_j; yTPsiIGradPsiPsiIy *= cov_pars[j + 1]; T1 LInvZtZj; if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal LInvZtZj = ZtZ_[cluster_i]; LInvZtZj.diagonal().array() /= chol_facts_[cluster_i].diagonal().array(); } else { CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj, cluster_i, true); } if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information LInvZtZj_cluster_i[j] = LInvZtZj; } double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj.squaredNorm(); trace_PsiInvGradPsi *= cov_pars[j + 1]; cov_grad[first_cov_par + j] += -1. * yTPsiIGradPsiPsiIy / sigma2_ / 2. + trace_PsiInvGradPsi / 2.; } if (save_psi_inv) { LInvZtZj_[cluster_i] = LInvZtZj_cluster_i; } }//end use_woodbury_identity_ else {//not use_woodbury_identity_ T1 psi_inv; CalcPsiInv(psi_inv, cluster_i); if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information psi_inv_[cluster_i] = psi_inv; } if (include_error_var) { cov_grad[0] += -1. * ((double)(y_[cluster_i].transpose() * y_aux_[cluster_i])) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.; } for (int j = 0; j < num_comps_total_; ++j) { for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) { std::shared_ptr<T1> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar, true, 1.); cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / sigma2_ / 2. + ((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.; } } }//end not use_woodbury_identity_ }//end not vecchia_approx_ }// end loop over clusters } /*! * \brief Apply a momentum step * \param it Iteration number * \param pars Parameters * \param pars_lag1 Parameters from last iteration * \param[out] pars_acc Accelerated parameters * \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param log_scale If true, the momentum step is done on the log-scale */ void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, vec_t& pars_acc, double nesterov_acc_rate = 0.5, int nesterov_schedule_version = 0, bool exclude_first_log_scale = true, int momentum_offset = 2, bool log_scale = false) { double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset); int num_par = (int)pars.size(); if (exclude_first_log_scale) { pars_acc[0] = pars[0]; pars_acc.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale } else { if (log_scale) { pars_acc = ((mu + 1.) * (pars.array().log()) - mu * (pars_lag1.array().log())).exp().matrix(); } else { pars_acc = (mu + 1) * pars - mu * pars_lag1; } } } /*! * \brief Update linear fixed-effect coefficients doing one gradient descent step * \param lr Learning rate * \param marg_var Marginal variance parameters sigma^2 * \param X Covariate data for linear fixed-effect * \param[out] beta Linear regression coefficients */ void UpdateCoefGradOneIter(double lr, double marg_var, den_mat_t& X, vec_t& beta) { vec_t y_aux(num_data_); GetYAux(y_aux); beta += lr * (1. / marg_var) * (X.transpose()) * y_aux; } /*! * \brief Update linear fixed-effect coefficients using generalized least squares (GLS) * \param X Covariate data for linear fixed-effect * \param[out] beta Linear regression coefficients */ void UpdateCoefGLS(den_mat_t& X, vec_t& beta) { vec_t y_aux(num_data_); GetYAux(y_aux); den_mat_t XT_psi_inv_X; CalcXTPsiInvX(X, XT_psi_inv_X); beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux); } /*! * \brief Check whether NaN's are presend * \param par Vector of parameters that should be checked */ void CheckNaNInf(vec_t& par) { if (std::isnan(par[0]) || std::isinf(par[0])) { Log::Fatal("NaN or Inf occurred. If this is a problem, consider doing the following. If you have used Fisher scoring, try using gradient descent. If you have used gradient descent, consider using a smaller learning rate."); } } /*! * \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first * \param cov_pars Covariance parameters * \param[out] FI Fisher information * \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true * \param include_error_var If true, the marginal variance parameter is also included, otherwise not * \param use_saved_psi_inv If false, the inverse covariance matrix Psi^-1 is calculated, otherwise a saved version is used */ void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true, bool include_error_var = false, bool use_saved_psi_inv = false) { if (include_error_var) { FI = den_mat_t(num_cov_par_, num_cov_par_); } else { FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1); } FI.setZero(); int start_cov_pars = include_error_var ? 1 : 0; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { //Note: if transf_scale==false, then all matrices and derivatives have been calculated on the original scale for the Vecchia approximation, that is why there is no adjustment here //Calculate auxiliary matrices for use below sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); Identity.setIdentity(); sp_mat_t B_inv; eigen_sp_Lower_sp_RHS_solve(B_[cluster_i], Identity, B_inv, true);//No noticeable difference in (n=500, nn=100/30) compared to using eigen_sp_Lower_sp_RHS_cs_solve() //eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true); sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); D.setIdentity(); D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1); sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); D_inv_2.setIdentity(); D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2); //Calculate derivative(B) * B^-1 std::vector<sp_mat_t> B_grad_B_inv(num_cov_par_ - 1); for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { B_grad_B_inv[par_nb] = B_grad_[cluster_i][par_nb] * B_inv; } //Calculate Fisher information sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D; if (include_error_var) { //First calculate terms for nugget effect / noise variance parameter if (transf_scale) {//Optimization is done on transformed scale (in particular, log-scale) //The derivative for the nugget variance on the log scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix. FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.; for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { FI(0, par_nb + 1) += (double)((D_inv_[cluster_i].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()) / 2.; } } else {//Original scale for asymptotic covariance matrix int ind_grad_nugget = num_cov_par_ - 1; D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv; B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D; double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum()); FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { B_grad_B_inv_D = B_grad_B_inv[par_nb] * D; diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()); FI(0, par_nb + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); } } } //Remaining covariance parameters for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_B_inv[par_nb]; for (int par_nb_cross = par_nb; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) { B_grad_B_inv_D = B_grad_B_inv[par_nb_cross] * D; double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array() * D_grad_[cluster_i][par_nb_cross].diagonal().array()).sum()); FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); } } }//end vecchia_approx_ else {//not vecchia_approx_ if (use_woodbury_identity_) { //Notation used below: M = Sigma^-1 + ZtZ, Sigma = cov(b) b=latent random effects, L=chol(M) i.e. M=LLt, MInv = M^-1 = L^-TL^-1 if (!use_saved_psi_inv) { LInvZtZj_[cluster_i] = std::vector<T1>(num_comps_total_); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal LInvZtZj_[cluster_i][0] = ZtZ_[cluster_i]; LInvZtZj_[cluster_i][0].diagonal().array() /= chol_facts_[cluster_i].diagonal().array(); } else { for (int j = 0; j < num_comps_total_; ++j) { CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj_[cluster_i][j], cluster_i, true); } } } if (include_error_var) { if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale) //The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix. FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.; for (int j = 0; j < num_comps_total_; ++j) { double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj_[cluster_i][j].squaredNorm(); FI(0, j + 1) += trace_PsiInvGradPsi * cov_pars[j + 1] / 2.; } }//end transf_scale else {//not transf_scale T1 MInv_ZtZ;//=(Sigma_inv + ZtZ)^-1 * ZtZ if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal MInv_ZtZ = T1(ZtZ_[cluster_i].rows(), ZtZ_[cluster_i].cols()); MInv_ZtZ.setIdentity();//initialize MInv_ZtZ.diagonal().array() = ZtZ_[cluster_i].diagonal().array() / (chol_facts_[cluster_i].diagonal().array().square()); } else { T1 ZtZ = T1(ZtZ_[cluster_i]);//TODO: this step is not needed for sparse matrices (i.e. copying is not required) MInv_ZtZ = chol_facts_solve_[cluster_i].solve(ZtZ); } T1 MInv_ZtZ_t = MInv_ZtZ.transpose();//TODO: possible without saving MInv_ZtZ.transpose()? -> compiler problem in MInv_ZtZ.cwiseProduct(MInv_ZtZ.transpose()) FI(0, 0) += (num_data_per_cluster_[cluster_i] - 2. * MInv_ZtZ.diagonal().sum() + (double)(MInv_ZtZ.cwiseProduct(MInv_ZtZ_t)).sum()) / (cov_pars[0] * cov_pars[0] * 2.); for (int j = 0; j < num_comps_total_; ++j) { T1 ZjZ_MInv_ZtZ_t = MInv_ZtZ_t * ZtZj_[cluster_i][j]; T1 ZtZj = T1(ZtZj_[cluster_i][j]); double trace_PsiInvGradPsi; if (num_comps_total_ > 1) { T1 MInv_ZtZj = chol_facts_solve_[cluster_i].solve(ZtZj); trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) + (double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZj)).sum(); } else { trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) + (double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZ)).sum(); } FI(0, j + 1) += trace_PsiInvGradPsi / (cov_pars[0] * cov_pars[0] * 2.); } }//end not transf_scale }//end include_error_var //Remaining covariance parameters for (int j = 0; j < num_comps_total_; ++j) { sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ(); for (int k = j; k < num_comps_total_; ++k) { sp_mat_t* Z_k = re_comps_[cluster_i][k]->GetZ(); sp_mat_t Zjt_Zk = (*Z_j).transpose() * (*Z_k); T1 LInvZtZj_t_LInvZtZk = LInvZtZj_[cluster_i][j].transpose() * LInvZtZj_[cluster_i][k]; double FI_jk = Zjt_Zk.squaredNorm() + LInvZtZj_t_LInvZtZk.squaredNorm() - 2. * (double)(Zjt_Zk.cwiseProduct(LInvZtZj_t_LInvZtZk)).sum(); if (transf_scale) { FI_jk *= cov_pars[j + 1] * cov_pars[k + 1]; } else { FI_jk /= cov_pars[0] * cov_pars[0]; } FI(j + start_cov_pars, k + start_cov_pars) += FI_jk / 2.; } } }//end use_woodbury_identity_ else {//not use_woodbury_identity_ T1 psi_inv; if (use_saved_psi_inv) { psi_inv = psi_inv_[cluster_i]; } else { CalcPsiInv(psi_inv, cluster_i); } if (!transf_scale) { psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix } //Calculate Psi^-1 * derivative(Psi) std::vector<T1> psi_inv_deriv_psi(num_cov_par_ - 1); int deriv_par_nb = 0; for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) { psi_inv_deriv_psi[deriv_par_nb] = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0])); deriv_par_nb++; } } //Calculate Fisher information if (include_error_var) { //First calculate terms for nugget effect / noise variance parameter if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale) //The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix. FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.; for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { FI(0, par_nb + 1) += psi_inv_deriv_psi[par_nb].diagonal().sum() / 2.; } } else {//Original scale for asymptotic covariance matrix //The derivative for the nugget variance is the identity matrix, i.e. psi_inv_grad_psi_sigma2 = psi_inv. FI(0, 0) += ((double)(psi_inv.cwiseProduct(psi_inv)).sum()) / 2.; for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { FI(0, par_nb + 1) += ((double)(psi_inv.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.; } } } //Remaining covariance parameters for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) { T1 psi_inv_grad_psi_par_nb_T = psi_inv_deriv_psi[par_nb].transpose(); FI(par_nb + start_cov_pars, par_nb + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.; for (int par_nb_cross = par_nb + 1; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) { FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb_cross])).sum()) / 2.; } psi_inv_deriv_psi[par_nb].resize(0, 0);//not needed anymore psi_inv_grad_psi_par_nb_T.resize(0, 0); } }//end not use_woodbury_identity_ }//end not vecchia_approx_ }//end loop over clusters FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose(); //for (int i = 0; i < std::min((int)FI.rows(),4); ++i) {//For debugging only // for (int j = i; j < std::min((int)FI.cols(),4); ++j) { // Log::Info("FI(%d,%d) %g", i, j, FI(i, j)); // } //} } /*! * \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization) * \param cov_pars MLE of covariance parameters * \param[out] std_dev Standard deviations */ void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) { SetCovParsComps(cov_pars); CalcCovFactor(true, false, cov_pars[0], true); den_mat_t FI; CalcFisherInformation(cov_pars, FI, false, true, false); std_dev = FI.inverse().diagonal().array().sqrt().matrix(); } /*! * \brief Calculate the standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information * \param cov_pars MLE of covariance parameters * \param X Covariate data for linear fixed-effect * \param[out] std_dev Standard deviations */ void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) { if ((int)std_dev.size() >= num_data_) { Log::Warning("Sample size too small to calculate standard deviations for coefficients"); for (int i = 0; i < (int)std_dev.size(); ++i) { std_dev[i] = std::numeric_limits<double>::quiet_NaN(); } } else { SetCovParsComps(cov_pars); CalcCovFactor(false, true, 1., false); den_mat_t FI((int)X.cols(), (int)X.cols()); CalcXTPsiInvX(X, FI); FI /= cov_pars[0]; std_dev = FI.inverse().diagonal().array().sqrt().matrix(); } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) for one cluster * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Number of prediction locations * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j) * \param re_group_rand_coef_data_pred Random coefficient data for grouped REs * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPred(gp_id_t cluster_i, int num_data_pred, std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred, const std::vector<std::vector<string_t>>& re_group_levels_pred, const double* re_group_rand_coef_data_pred, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { // Vector which contains covariance matrices needed for making predictions in the following order: // 0. Ztilde*Sigma*Z^T, 1. Zstar*Sigmatilde^T*Z^T, 2. Ztilde*Sigma*Ztilde^T, 3. Ztilde*Sigmatilde*Zstar^T, 4. Zstar*Sigmastar*Zstar^T std::vector<T1> pred_mats(5); //Define which covariance matrices are zero ('false') or non-zero ('true') std::vector<bool> active_mats{ false, false, false, false, false }; if (num_re_group_total_ > 0) { active_mats[0] = true; active_mats[2] = true; active_mats[4] = true; } if (num_gp_total_ > 0) { active_mats[1] = true; active_mats[4] = true; } //Initialize covariance matrices for (int i = 0; i < 2; ++i) { if (active_mats[i]) { pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]); pred_mats[i].setZero(); } } if (predict_cov_mat) { for (int i = 2; i < 5; ++i) { if (active_mats[i]) { pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); pred_mats[i].setZero(); } } } //Calculate covariance matrices int cn = 0;//component number //Grouped random effects if (num_re_group_ > 0) { for (int j = 0; j < num_re_group_; ++j) { std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]); std::vector<re_group_t> group_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { group_data.push_back(re_group_levels_pred[j][id]); } re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat); cn += 1; } if (num_re_group_rand_coef_ > 0) { //Random coefficient grouped random effects for (int j = 0; j < num_re_group_rand_coef_; ++j) { std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]); std::vector<re_group_t> group_data; std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]); group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index } re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat, rand_coef_data.data()); cn += 1; } } } //Gaussian process if (num_gp_ > 0) { std::shared_ptr<RECompGP<T1>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]); re_comp_base->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat); cn += 1; if (num_gp_rand_coef_ > 0) { std::shared_ptr<RECompGP<T1>> re_comp; //Random coefficient Gaussian processes for (int j = 0; j < num_gp_rand_coef_; ++j) { re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]); std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]); } re_comp->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat, rand_coef_data.data()); cn += 1; } } } T1 M_aux(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]);//Ztilde*Sigma*Z^T + Zstar*Sigmatilde^T*Z^T M_aux.setZero(); for (int i = 0; i < 2; ++i) { if (active_mats[i]) { M_aux += pred_mats[i]; } } mean_pred_id = M_aux * y_aux_[cluster_i]; if (predict_cov_mat) { cov_mat_pred_id.setIdentity(); for (int i = 2; i < 5; ++i) { if (active_mats[i]) { cov_mat_pred_id += pred_mats[i]; if (i == 3) {//Ztilde*Sigmatilde*Zstar^T cov_mat_pred_id += T1(pred_mats[i].transpose()); } } } if (use_woodbury_identity_) { T1 ZtM_aux = T1(Zt_[cluster_i] * M_aux.transpose()); if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal ZtM_aux = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtM_aux; cov_mat_pred_id -= (M_aux * T1(M_aux.transpose()) - ZtM_aux.transpose() * ZtM_aux); } else { cov_mat_pred_id -= (M_aux * T1(M_aux.transpose()) - ZtM_aux.transpose() * chol_facts_solve_[cluster_i].solve(ZtM_aux)); } } else { cov_mat_pred_id -= (M_aux * (chol_facts_solve_[cluster_i].solve(T1(M_aux.transpose())))); } } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering * \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Number of prediction locations * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, int num_data_pred, std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; //Find nearest neighbors den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_); coords_all << gp_coords_mat_obs, gp_coords_mat_pred; std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli); if (CondObsOnly) { find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1); } else {//find neighbors among both the observed and prediction locations find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1); } //Random coefficients std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli); if (num_gp_rand_coef_ > 0) { for (int j = 0; j < num_gp_rand_coef_; ++j) { std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder) rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]); } #pragma omp for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { if (j == 0) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_); } int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1; vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[num_data_cli + i]; if ((num_data_cli + i) > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } // Determine Triplet for initializing Bpo and Bp std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp; for (int i = 0; i < num_data_pred_cli; ++i) { entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } else {//nearest neighbor belongs to predicted data entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.)); } } } sp_mat_t Bpo(num_data_pred_cli, num_data_cli); sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli); Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel) Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end()); sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli); Dp.setIdentity();//Put 1 on the diagonal (for nugget effect) #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below for (int j = 0; j < num_gp_total_; ++j) { if (j == 0) { re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false); re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; } }//end loop over components j //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0]; if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } Dp.coeffRef(i, i) += d_comp_j; } //2. remaining terms cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn); } } Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); }//end loop over data i mean_pred_id = -Bpo * y_[cluster_i]; if (!CondObsOnly) { sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data()); } if (predict_cov_mat) { if (CondObsOnly) { cov_mat_pred_id = Dp; } else { sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli); Identity.setIdentity(); sp_mat_t Bp_inv; eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true); cov_mat_pred_id = T1(Bp_inv * Dp * Bp_inv.transpose()); } } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Number of prediction locations * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaPredictedFirstOrder(gp_id_t cluster_i, int num_data_pred, std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; int num_data_tot = num_data_cli + num_data_pred_cli; //Find nearest neighbors den_mat_t coords_all(num_data_tot, dim_gp_coords_); coords_all << gp_coords_mat_pred, gp_coords_mat_obs; std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot); find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); //Prepare data for random coefficients std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot); if (num_gp_rand_coef_ > 0) { for (int j = 0; j < num_gp_rand_coef_; ++j) { std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data #pragma omp for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]]; } #pragma omp for schedule(static) for (int i = 0; i < num_data_cli; ++i) { rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i]; } #pragma omp for schedule(static) for (int i = 0; i < num_data_tot; ++i) { if (j == 0) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_); } int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1; vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[i]; if (i > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } // Determine Triplet for initializing Bo, Bop, and Bp std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp; for (int i = 0; i < num_data_pred_cli; ++i) { entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } } for (int i = 0; i < num_data_cli; ++i) { entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) { if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.)); } else {//nearest neighbor belongs to predicted data entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.)); } } } sp_mat_t Bo(num_data_cli, num_data_cli); sp_mat_t Bop(num_data_cli, num_data_pred_cli); sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli); Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel) Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end()); Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end()); sp_mat_t Do_inv(num_data_cli, num_data_cli); sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli); Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect) Dp_inv.setIdentity(); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_tot; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below if (i > 0) { for (int j = 0; j < num_gp_total_; ++j) { if (j == 0) { re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false); re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; } }//end loop over components j } //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0]; if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) += d_comp_j; } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j; } } //2. remaining terms if (i > 0) { cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { if (i < num_data_pred_cli) { Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn); } } } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i); } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli); } }//end loop over data i sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop; chol_sp_mat_t CholFact; CholFact.compute(cond_prec); if (predict_cov_mat) { sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli); Identity.setIdentity(); sp_mat_t cond_prec_chol = CholFact.matrixL(); sp_mat_t cond_prec_chol_inv; eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true); cov_mat_pred_id = T1(cond_prec_chol_inv.transpose() * cond_prec_chol_inv); mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i]; } else { mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]); } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering * \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data * \param cluster_i Cluster index for which prediction are made * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, std::map<gp_id_t, int>& num_data_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { if (num_gp_rand_coef_ > 0) { Log::Fatal("The Vecchia approximation for latent process(es) is currently not implemented when having random coefficients"); } int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; int num_data_tot = num_data_cli + num_data_pred_cli; //Find nearest neighbors den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_); coords_all << gp_coords_mat_obs, gp_coords_mat_pred; //Determine number of unique observartion locations std::vector<int> uniques;//unique points std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx); int num_coord_unique_obs = (int)uniques.size(); //Determine unique locations (observed and predicted) DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx); int num_coord_unique = (int)uniques.size(); den_mat_t coords_all_unique; if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed coords_all_unique = coords_all; } else { coords_all_unique = coords_all(uniques, Eigen::all); } //Determine incidence matrices sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size()); sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size()); for (int i = 0; i < num_data_tot; ++i) { if (i < num_data_cli) { Z_o.insert(i, unique_idx[i]) = 1.; } else { Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.; } } std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique); if (CondObsOnly) {//find neighbors among both the observed locations only find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1); } else {//find neighbors among both the observed and prediction locations find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); } // Determine Triplet for initializing Bpo and Bp std::vector<Triplet_t> entries_init_B; for (int i = 0; i < num_coord_unique; ++i) { entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } } sp_mat_t B(num_coord_unique, num_coord_unique); B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel) sp_mat_t D(num_coord_unique, num_coord_unique); D.setIdentity(); D.diagonal().array() = 0.; #pragma omp parallel for schedule(static) for (int i = 0; i < num_coord_unique; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below if (i > 0) { re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0]; //2. remaining terms if (i > 0) { den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } }//end loop over data i //Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1 sp_mat_t D_inv(num_coord_unique, num_coord_unique); D_inv.setIdentity(); D_inv.diagonal().array() = D.diagonal().array().pow(-1); sp_mat_t Identity_all(num_coord_unique, num_coord_unique); Identity_all.setIdentity(); sp_mat_t B_inv; eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true); //Calculate inverse of covariance matrix for observed data using the Woodbury identity sp_mat_t Z_o_T = Z_o.transpose(); sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o; chol_sp_mat_t CholFac_M_aux_Woodbury; CholFac_M_aux_Woodbury.compute(M_aux_Woodbury); if (predict_cov_mat) { //Using Eigen's solver sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T); sp_mat_t Identity_obs(num_data_cli, num_data_cli); Identity_obs.setIdentity(); sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs; sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T; sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv; mean_pred_id = M_aux * y_[cluster_i]; sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli); Identity_pred.setIdentity(); cov_mat_pred_id = T1(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose()); } else { vec_t resp_aux = Z_o_T * y_[cluster_i]; vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux); resp_aux = y_[cluster_i] - Z_o * resp_aux2; mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux; } } friend class REModel; }; } // namespace GPBoost #endif // GPB_RE_MODEL_TEMPLATE_H_
residualbased_elimination_builder_and_solver_slip.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SLIP ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SLIP /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif #include "utilities/openmp_utils.h" /* External includes */ /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. This is a specialization of the standard buliding strategy to the case in which a single variable is to be used in the building. the creation of the DofList and the construction of the system matrix is in this case much faster as the neighborhood relationships are considered to be known \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace, class TLinearSolver, class TVariableType > class ResidualBasedEliminationBuilderAndSolverSlip : public ResidualBasedEliminationBuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolverSlip); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedEliminationBuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > ResidualBasedEliminationBuilderAndSolverType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverSlip( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "name" : "ResidualBasedEliminationBuilderAndSolverSlip", "domain_size" : 3, "variable_x" : "VELOCITY_X", "variable_y" : "VELOCITY_Y", "variable_z" : "VELOCITY_Z" })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); mdim = ThisParameters["domain_size"].GetInt(); mrVar_x = KratosComponents<TVariableType>::Get(ThisParameters["variable_x"].GetString()); mrVar_y = KratosComponents<TVariableType>::Get(ThisParameters["variable_y"].GetString()); mrVar_z = KratosComponents<TVariableType>::Get(ThisParameters["variable_z"].GetString()); } /** * @brief Default constructor. */ ResidualBasedEliminationBuilderAndSolverSlip( typename TLinearSolver::Pointer pNewLinearSystemSolver, unsigned int dim, TVariableType const& Var_x, TVariableType const& Var_y, TVariableType const& Var_z) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver) , mdim(dim), mrVar_x(Var_x), mrVar_y(Var_y), mrVar_z(Var_z) { /* std::cout << "using the standard builder and solver " << std::endl; */ } /** Destructor. */ virtual ~ResidualBasedEliminationBuilderAndSolverSlip() { } /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY if (!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model //ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions //ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); //create a partition of the element array int number_of_threads = OpenMPUtils::GetNumThreads(); double start_prod = OpenMPUtils::GetCurrentTime(); #ifdef _OPENMP //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(A.size1()); int A_size = A.size1(); for (int i = 0; i < A_size; i++) omp_init_lock(&lock_array[i]); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //COMPONENT X CurrentProcessInfo[FRACTIONAL_STEP] = 1; ParallelBuildComponent(mrVar_x, lock_array, pScheme, r_model_part, A, b); //COMPONENT Y CurrentProcessInfo[FRACTIONAL_STEP] = 2; ParallelBuildComponent(mrVar_y, lock_array, pScheme, r_model_part, A, b); //COMPONENT Z if (mdim == 3) { CurrentProcessInfo[FRACTIONAL_STEP] = 3; ParallelBuildComponent(mrVar_z, lock_array, pScheme, r_model_part, A, b); } #else ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //COMPONENT X CurrentProcessInfo[FRACTIONAL_STEP] = 1; ScalarBuildComponent(mrVar_x, pScheme, r_model_part, A, b); //COMPONENT Y CurrentProcessInfo[FRACTIONAL_STEP] = 2; ScalarBuildComponent(mrVar_y, pScheme, r_model_part, A, b); //COMPONENT Z if (mdim == 3) { CurrentProcessInfo[FRACTIONAL_STEP] = 3; ScalarBuildComponent(mrVar_z, pScheme, r_model_part, A, b); } #endif //assemble slip component vector<unsigned int> nodes_partition; CreatePartition(number_of_threads, mActiveNodes.size(), nodes_partition); #pragma omp parallel for firstprivate(number_of_threads) schedule(static,1) for (int k = 0; k < number_of_threads; k++) { // KRATOS_WATCH("insdie the loop!!!!!"); // KRATOS_WATCH(nodes_partition); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(mdim, mdim); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(mdim); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; //const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); GlobalPointersVector< Node < 3 > >::iterator it_begin = mActiveNodes.begin() + nodes_partition[k]; GlobalPointersVector< Node < 3 > >::iterator it_end = mActiveNodes.begin() + nodes_partition[k + 1]; for (GlobalPointersVector< Node < 3 > >::iterator it = it_begin; it != it_end; it++) { // KRATOS_WATCH(it->GetValue(IS_STRUCTURE)); if (it->GetValue(IS_STRUCTURE) == 1.0) //slip node! { if (EquationId.size() != mdim) EquationId.resize(mdim, false); // KRATOS_WATCH(it->Id()); EquationId[0] = it->GetDof(mrVar_x).EquationId(); EquationId[1] = it->GetDof(mrVar_y).EquationId(); if (mdim == 3) EquationId[2] = it->GetDof(mrVar_z).EquationId(); array_1d<double, 3 > diags; for (unsigned int i = 0; i < mdim; i++) diags[i] = A(EquationId[i], EquationId[i]); double large_diag = diags[0]; for (unsigned int i = 1; i < mdim; i++) if (fabs(large_diag) < fabs(diags[i])) large_diag = diags[i]; const array_1d<double, 3 > & n = it->GetValue(NORMAL); double norm_v2 = inner_prod(n, n); //double Area = sqrt(norm_v2); //double h = sqrt(Area); double factor = 10.0 * large_diag / norm_v2; // double factor = 1000.0*(Area/h) /norm_v2; // double factor = 1.0*(Area/h) /norm_v2; noalias(LHS_Contribution) = ZeroMatrix(mdim, mdim); const array_1d<double, 3 > vel = it->FastGetSolutionStepValue(FRACT_VEL); double scalar_prod = inner_prod(n, vel); for (unsigned int i = 0; i < mdim; i++) { for (unsigned int j = 0; j < mdim; j++) LHS_Contribution(i, j) = factor * n[i] * n[j]; RHS_Contribution[i] = -factor * n[i] * scalar_prod; // RHS_Contribution[i] += n[i]*it->FastGetSolutionStepValue(PRESSURE); } #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array); #else this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId); #endif } } } if (this->GetEchoLevel() > 0) { double stop_prod = OpenMPUtils::GetCurrentTime(); std::cout << "parallel building time: " << stop_prod - start_prod << std::endl; } #ifdef _OPENMP for (int i = 0; i < A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) { KRATOS_TRY // KRATOS_WATCH("entering in setup dofset"); //fills a list of "active" nodes defined as nodes which have neighbours // AND no fixed pressure mActiveNodes.clear(); mActiveNodes.reserve(r_model_part.Nodes().size()); for (typename NodesArrayType::iterator it = r_model_part.NodesBegin(); it != r_model_part.NodesEnd(); ++it) { if ((it->GetValue(NEIGHBOUR_NODES)).size() != 0) { mActiveNodes.push_back(*(it.base())); } } // KRATOS_WATCH(mActiveNodes.size()); //fills the DofList and give a unique progressive tag to each node BaseType::mDofSet.clear(); BaseType::mDofSet.reserve(mActiveNodes.size()); for (GlobalPointersVector< Node < 3 > >::iterator iii = mActiveNodes.begin(); iii != mActiveNodes.end(); iii++) { BaseType::mDofSet.push_back(iii->pGetDof(mrVar_x).get()); BaseType::mDofSet.push_back(iii->pGetDof(mrVar_y).get()); if (mdim == 3) BaseType::mDofSet.push_back(iii->pGetDof(mrVar_z).get()); } //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size() == 0) KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; // KRATOS_WATCH("finished setup dofset"); // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) { KRATOS_TRY if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; // KRATOS_WATCH("builder 436") //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ParallelConstructGraph(A); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { //KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ParallelConstructGraph(A); } } if (Dx.size() != BaseType::mEquationSystemSize) { Dx.resize(BaseType::mEquationSystemSize, false); } TSparseSpace::SetToZero(Dx); if (b.size() != BaseType::mEquationSystemSize) { b.resize(BaseType::mEquationSystemSize, false); } TSparseSpace::SetToZero(b); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void Clear() { this->mDofSet.clear(); // = DofsArrayType(); if (this->mpReactionsVector != NULL) { TSparseSpace::Clear((this->mpReactionsVector)); } // *(this->mpReactionsVector) = TSystemVectorType(); if (this->GetEchoLevel() > 1) { KRATOS_WATCH("ResidualBasedEliminationBuilderAndSolver Clear Function called"); } } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverSlip"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ //************************************************************************** //************************************************************************** //************************************************************************** //************************************************************************** void ParallelConstructGraph(TSystemMatrixType& A) { KRATOS_TRY std::vector< std::vector<std::size_t> > index_list(BaseType::mEquationSystemSize); // KRATOS_WATCH("inside PArallel Construct Graph") int number_of_threads = OpenMPUtils::GetNumThreads(); unsigned int pos_x = (mActiveNodes.begin())->GetDofPosition(mrVar_x); unsigned int pos_y = (mActiveNodes.begin())->GetDofPosition(mrVar_y); unsigned int pos_z = (mActiveNodes.begin())->GetDofPosition(mrVar_z); //constructing the system matrix row by row vector<unsigned int> partition; vector<unsigned int> local_sizes(number_of_threads); #pragma omp parallel for for (int i = 0; i < number_of_threads; i++) local_sizes[i] = 0; CreatePartition(number_of_threads, mActiveNodes.size(), partition); #pragma omp parallel for firstprivate(number_of_threads,pos_x,pos_y,pos_z) schedule(static,1) for (int k = 0; k < number_of_threads; k++) { GlobalPointersVector< Node < 3 > >::iterator it_begin = mActiveNodes.begin() + partition[k]; GlobalPointersVector< Node < 3 > >::iterator it_end = mActiveNodes.begin() + partition[k + 1]; for (GlobalPointersVector< Node < 3 > >::iterator in = it_begin; in != it_end; in++) { Node < 3 > ::DofType& current_dof_x = in->GetDof(mrVar_x, pos_x); Node < 3 > ::DofType& current_dof_y = in->GetDof(mrVar_y, pos_y); Node < 3 > ::DofType& current_dof_z = in->GetDof(mrVar_z, pos_z); unsigned int is_slip = in->GetValue(IS_STRUCTURE); //initialize component X if (current_dof_x.IsFixed() == false) { std::size_t index_i = (current_dof_x).EquationId(); GlobalPointersVector< Node < 3 > >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<std::size_t>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size() + 4); if (is_slip == 1.0) { indices.push_back(index_i); indices.push_back((current_dof_y).EquationId()); if (mdim == 3) indices.push_back((current_dof_z).EquationId()); } else { indices.push_back(index_i); } //filling the first neighbours list for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { Node < 3 > ::DofType& neighb_dof = i->GetDof(mrVar_x, pos_x); if (neighb_dof.IsFixed() == false) { std::size_t index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(), indices.end()); typename std::vector<std::size_t>::iterator new_end = std::unique(indices.begin(), indices.end()); indices.erase(new_end, indices.end()); local_sizes[k] += indices.size(); } //initialize component Y if (current_dof_y.IsFixed() == false) { std::size_t index_i = (current_dof_y).EquationId(); GlobalPointersVector< Node < 3 > >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<std::size_t>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size() + 4); //filling the first neighbours list if (is_slip == 1.0) { indices.push_back((current_dof_x).EquationId()); indices.push_back(index_i); if (mdim == 3) indices.push_back((current_dof_z).EquationId()); } else { indices.push_back(index_i); } for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { Node < 3 > ::DofType& neighb_dof = i->GetDof(mrVar_y, pos_y); if (neighb_dof.IsFixed() == false) { std::size_t index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(), indices.end()); typename std::vector<std::size_t>::iterator new_end = std::unique(indices.begin(), indices.end()); indices.erase(new_end, indices.end()); local_sizes[k] += indices.size(); } //initialize component Z if (mdim == 3) { if (current_dof_z.IsFixed() == false) { std::size_t index_i = (current_dof_z).EquationId(); GlobalPointersVector< Node < 3 > >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES); std::vector<std::size_t>& indices = index_list[index_i]; indices.reserve(neighb_nodes.size() + 4); //filling the first neighbours list if (is_slip == 1.0) { indices.push_back((current_dof_x).EquationId()); indices.push_back((current_dof_y).EquationId()); indices.push_back(index_i); } else { indices.push_back(index_i); } for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { Node < 3 > ::DofType& neighb_dof = i->GetDof(mrVar_z, pos_z); if (neighb_dof.IsFixed() == false) { std::size_t index_j = (neighb_dof).EquationId(); indices.push_back(index_j); } } //sorting the indices and elminating the duplicates std::sort(indices.begin(), indices.end()); typename std::vector<std::size_t>::iterator new_end = std::unique(indices.begin(), indices.end()); indices.erase(new_end, indices.end()); local_sizes[k] += indices.size(); } } } } //calculate the total size of the system int total_size = 0.0; #pragma omp parallel for reduction(+:total_size) for (int i = 0; i < number_of_threads; i++) total_size += local_sizes[i]; A.reserve(total_size, false); #ifndef _OPENMP for (std::size_t i = 0; i < BaseType::mEquationSystemSize; i++) { std::vector<std::size_t>& row_indices = index_list[i]; for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } #else vector<unsigned int> matrix_partition; CreatePartition(number_of_threads, BaseType::mEquationSystemSize, matrix_partition); KRATOS_WATCH(matrix_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (omp_get_thread_num() == k) { for (std::size_t i = matrix_partition[k]; i < matrix_partition[k + 1]; i++) { std::vector<std::size_t>& row_indices = index_list[i]; for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } } } #endif // KRATOS_WATCH("finished PArallel Construct Graph") KRATOS_CATCH("") } //************************************************************************** //************************************************************************** #ifdef _OPENMP void ParallelBuildComponent( const TVariableType& rLocalVar, std::vector< omp_lock_t > lock_array, typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) #else void ScalarBuildComponent( const TVariableType& rLocalVar, typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) #endif { KRATOS_TRY if (!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); int number_of_threads = OpenMPUtils::GetNumThreads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); double start_prod = OpenMPUtils::GetCurrentTime(); unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rLocalVar); #pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1) for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution (*it)->InitializeNonLinearIteration(CurrentProcessInfo); (*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Geometry< Node < 3 > >& geom = (*it)->GetGeometry(); if (EquationId.size() != geom.size()) EquationId.resize(geom.size(), false); for (unsigned int i = 0; i < geom.size(); i++) EquationId[i] = geom[i].GetDof(rLocalVar, pos).EquationId(); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array); #else this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } // KRATOS_WATCH("elements are built") //unsigned int A_size=A.size1(); vector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); // KRATOS_WATCH(condition_partition) #pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1) for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1]; // A all elements for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution (*it)->InitializeNonLinearIteration(CurrentProcessInfo); (*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Geometry< Node < 3 > >& geom = (*it)->GetGeometry(); if (EquationId.size() != geom.size()) EquationId.resize(geom.size(), false); for (unsigned int i = 0; i < geom.size(); i++) { EquationId[i] = geom[i].GetDof(rLocalVar, pos).EquationId(); } //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array); #else this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } if (this->GetEchoLevel() > 0) { double stop_prod = OpenMPUtils::GetCurrentTime(); std::cout << "parallel building time: " << stop_prod - start_prod << std::endl; } KRATOS_CATCH("") } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ unsigned int mdim; TVariableType const & mrVar_x; TVariableType const & mrVar_y; TVariableType const & mrVar_z; GlobalPointersVector<Node < 3 > > mActiveNodes; /*@} */ /**@name Private Operators*/ /*@{ */ //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SLIP */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE defined */
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 4096 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 16 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (double *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(double *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(double *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); exception=(&image->exception); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau*=PerceptibleReciprocal((MagickRealType) number_nodes); /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
general_basis_get_vec.h
#ifndef _GENERAL_BASIS_GET_VEC_H #define _GENERAL_BASIS_GET_VEC_H #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" #include "openmp.h" namespace basis_general { template<class T,class P> bool inline update_out_dense(std::complex<double> c, P phase, npy_intp n_vec,const std::complex<T> *in, std::complex<T> *out){ for(npy_intp i=0;i<n_vec;i++){ out[i] += T(phase) * std::complex<T>(c) * in[i]; } return true; } template<class T,class P> bool inline update_out_dense(std::complex<double> c, P phase, npy_intp n_vec,const T *in, T *out){ if(std::abs(c.imag())>1.1e-15){ return false; } else{ T re = c.real(); for(npy_intp i=0;i<n_vec;i++){ out[i] += T(phase) * re * in[i]; } return true; } } template<class T> bool inline update_out_dense(std::complex<double> c, std::complex<double> phase, npy_intp n_vec,const std::complex<T> *in, std::complex<T> *out){ for(npy_intp i=0;i<n_vec;i++){ out[i] += std::complex<T>(phase*c) * in[i]; } return true; } template<class T> bool inline update_out_dense(std::complex<double> c, std::complex<double> phase, npy_intp n_vec,const T *in, T *out){ c *= phase; if(std::abs(c.imag())>1.1e-15){ return false; } else{ T re = c.real(); for(npy_intp i=0;i<n_vec;i++){ out[i] += re * in[i]; } return true; } } template<class I,class T,class P=signed char> bool get_vec_rep(general_basis_core<I,P> *B, I s, P &phase, const int nt, const npy_intp n_vec, const npy_intp Ns_full, const T in[], std::complex<double> c, T out[], const int depth) { bool err = true; if(nt<=0){ const npy_intp full = (Ns_full - s - 1)*n_vec; err = update_out_dense(c,phase,n_vec,in,&out[full]); return err; } int per = B->pers[depth]; double q = (2.0*M_PI*B->qs[depth])/per; std::complex<double> cc = std::exp(std::complex<double>(0,-q)); if(depth < nt-1){ for(int j=0;j<per && err;j++){ err = get_vec_rep(B,s,phase,nt,n_vec,Ns_full,in,c,out,depth+1); c *= cc; s = B->map_state(s,depth,phase); } return err; } else{ for(int j=0;j<per && err;j++){ const npy_intp full = (Ns_full - s - 1)*n_vec; err = update_out_dense(c,phase,n_vec,in,&out[full]); c *= cc; s = B->map_state(s,depth,phase); } return err; } } template<class I,class T,class P=signed char> bool get_vec_rep_pcon(general_basis_core<I,P> *B, I s, P &phase, const int nt, const npy_intp n_vec, const I basis_pcon[], const npy_intp Ns_full, const T in[], std::complex<double> c, T out[], const int depth) { bool err = true; if(nt<=0){ const npy_intp full = binary_search(Ns_full,basis_pcon,s)*n_vec; err = update_out_dense(c,phase,n_vec,in,&out[full]); return err; } int per = B->pers[depth]; double q = (2.0*M_PI*B->qs[depth])/per; std::complex<double> cc = std::exp(std::complex<double>(0,-q)); if(depth < nt-1){ for(int j=0;j<per && err;j++){ err = get_vec_rep_pcon(B,s,phase,nt,n_vec,basis_pcon,Ns_full,in,c,out,depth+1); c *= cc; s = B->map_state(s,depth,phase); } return err; } else{ for(int j=0;j<per && err;j++){ const npy_intp full = binary_search(Ns_full,basis_pcon,s)*n_vec; err = update_out_dense(c,phase,n_vec,in,&out[full]); c *= cc; s = B->map_state(s,depth,phase); } return err; } } template<class I,class J,class T,class P=signed char> bool get_vec_general_pcon_dense(general_basis_core<I,P> *B, const I basis[], const J n[], const npy_intp n_vec, const npy_intp Ns, const npy_intp Ns_full, const I basis_pcon[], const T in[], T out[]) { bool err = true; const int nt = B->get_nt(); const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); double norm = 1.0; for(int i=0;i<nt;i++){ norm *= B->pers[i]; } #pragma omp parallel for schedule(dynamic,chunk) firstprivate(norm) for(npy_intp k=0;k<Ns;k++){ if(!err){continue;} std::complex<double> c = 1.0/std::sqrt(n[k]*norm); P phase = 1; bool local_err = get_vec_rep_pcon(B,basis[k],phase,nt,n_vec,basis_pcon,Ns_full,&in[k*n_vec],c,out,0); if(!local_err){ #pragma omp critical err = local_err; } } return err; } template<class I,class J,class T,class P=signed char> bool get_vec_general_dense(general_basis_core<I,P> *B, const I basis[], const J n[], const npy_intp n_vec, const npy_intp Ns, const npy_intp Ns_full, const T in[], T out[]) { bool err = true; const int nt = B->get_nt(); const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); double norm = 1.0; for(int i=0;i<nt;i++){ norm *= B->pers[i]; } #pragma omp parallel for schedule(dynamic,chunk) firstprivate(norm) for(npy_intp k=0;k<Ns;k++){ if(!err){continue;} std::complex<double> c = 1.0/std::sqrt(n[k]*norm); P phase = 1; bool local_err = get_vec_rep(B,basis[k],phase,nt,n_vec,Ns_full,&in[k*n_vec],c,out,0); if(!local_err){ #pragma omp critical err = local_err; } } return err; } } #endif
GB_unaryop__minv_uint32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_fp64 // op(A') function: GB_tran__minv_uint32_fp64 // C type: uint32_t // A type: double // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ double #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_fp64 ( uint32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blurtwopart.c
#include <stdlib.h> #include "blurtwopart.h" void blurtwopart(float* v,int M,int N,float*output){ float* tmp2 = (float*) calloc(1,(((1 + ((N + 1 + 0) - (1))) + (N + 2 - (N + 1)))) * (M) * sizeof (float)); for (int H35 = 0; H35 < 1; H35++) { for (int H36 = 0; H36 < M; H36++) { tmp2[(M) * (H35) + H36] = 0; } } for (int H37 = 1; H37 < (N + 1 + 0); H37++) { for (int H46 = 0; H46 < 1; H46++) { float tmp3 = 0; tmp3 = v[(((M)) * (H37 - (1))) + H46]; float tmp4 = 0; tmp4 = v[(((M)) * (H37 - (1))) + H46 + 1]; tmp2[(((1 + ((M - (1 + 0)) - (1))) + (M - (M - (1))))) * (((H37 - (1)) + 1)) + H46] = tmp3 + tmp4; } for (int H47 = 1; H47 < (M - (1 + 0)); H47++) { float tmp5 = 0; float tmp6 = 0; tmp6 = v[(((M)) * (H37 - (1))) + H47 - (1)]; float tmp7 = 0; tmp7 = v[(((M)) * (H37 - (1))) + H47]; tmp5 = tmp6 + tmp7; float tmp8 = 0; tmp8 = v[(((M)) * (H37 - (1))) + H47 + 1]; tmp2[(((1 + ((M - (1 + 0)) - (1))) + (M - (M - (1))))) * (((H37 - (1)) + 1)) + ((H47 - (1)) + 1)] = tmp5 + tmp8; } for (int H48 = M - (1); H48 < M; H48++) { float tmp9 = 0; tmp9 = v[(((M)) * (H37 - (1))) + H48 - (1)]; float tmp10 = 0; tmp10 = v[(((M)) * (H37 - (1))) + H48]; tmp2[(((1 + ((M - (1 + 0)) - (1))) + (M - (M - (1))))) * (((H37 - (1)) + 1)) + ((H48 - (M - (1))) + (1 + ((M - (1 + 0)) - (1))))] = tmp9 + tmp10; } } for (int H49 = N + 1; H49 < N + 2; H49++) { for (int H50 = 0; H50 < M; H50++) { tmp2[(M) * (((H49 - (N + 1)) + (1 + ((N + 1 + 0) - (1))))) + H50] = 0; } } float* x0 = tmp2; #pragma omp parallel for for (int H52 = 0; H52 < N; H52++) { for (int H53 = 0; H53 < M; H53++) { float tmp11 = 0; float tmp12 = 0; tmp12 = x0[(((M)) * (H52)) + H53]; float tmp13 = 0; tmp13 = x0[(((M)) * (H52 + 1)) + H53]; tmp11 = tmp12 + tmp13; float tmp14 = 0; tmp14 = x0[(((M)) * (H52 + 2)) + H53]; output[(M) * (H52) + H53] = tmp11 + tmp14; } } free(tmp2); }
tsynchtasks.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ long result=0; void foo() { #pragma omp parallel #pragma omp single { int argum = 1; #pragma omp taskgroup { #pragma omp task shared(result) firstprivate(argum) for (long i = 0; i < 10; i++) { #pragma omp atomic result += argum; } argum++; #pragma omp task shared(result) firstprivate(argum) for (long i = 0; i < 10; i++) { #pragma omp atomic result += argum; } #pragma omp taskwait argum = result; for (long i = 0; i < 10; i++) { #pragma omp task shared(result) firstprivate(argum) #pragma omp atomic result += argum; } } #pragma omp task firstprivate(result) firstprivate(argum) printf("Hello from third task, up to now result=%ld and argum = %d\n", result, argum); } } int main(int argc, char *argv[]) { foo(); printf("Back in main ... result = %ld\n", result); }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i + 1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i + 1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i + 1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i + num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i - 1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i - 1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i - 1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i + 1); hypreDevice_IntegerInclusiveScan(B_nrows + 1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i + B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, HYPRE_Int want_data, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i - 1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i - 1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); if (want_data) { B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); } for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ if (want_data) { comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); } else { comm_handle_a = NULL; } comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1, 1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p + i] = read_only_load(d_diag_j + i); d_ab[p + i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p + i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p + i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); const dim3 gDim = hypre_GetDefaultDeviceGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros( A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultDeviceGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; nrows = row_end - row_start; if (row < row_start || row >= row_end) { return (-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncComputeStream(hypre_handle()); return hypre_error_flag; } /* Get element-wise tolerances based on row norms for ParCSRMatrix * NOTE: Keep the diagonal, i.e. elmt_tol = 0.0 for diagonals * Output vectors have size nnz: * elmt_tols_diag[j] = tol * (norm of row i) for j in [ A_diag_i[i] , A_diag_i[i+1] ) * elmt_tols_offd[j] = tol * (norm of row i) for j in [ A_offd_i[i] , A_offd_i[i+1] ) * type == -1, infinity norm, * 1, 1-norm * 2, 2-norm */ template<HYPRE_Int type> __global__ void hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols( HYPRE_Int nrows, HYPRE_Real tol, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Complex *A_diag_a, HYPRE_Int *A_offd_i, HYPRE_Complex *A_offd_a, HYPRE_Real *elmt_tols_diag, HYPRE_Real *elmt_tols_offd) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Int p_diag, p_offd, q_diag, q_offd; /* sum row norm over diag part */ if (lane < 2) { p_diag = read_only_load(A_diag_i + row_i + lane); } q_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 1); p_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 0); HYPRE_Real row_norm_i = 0.0; for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE) { HYPRE_Complex val = A_diag_a[j]; if (type == -1) { row_norm_i = hypre_max(row_norm_i, hypre_cabs(val)); } else if (type == 1) { row_norm_i += hypre_cabs(val); } else if (type == 2) { row_norm_i += val * val; } } /* sum row norm over offd part */ if (lane < 2) { p_offd = read_only_load(A_offd_i + row_i + lane); } q_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 1); p_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 0); for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE) { HYPRE_Complex val = A_offd_a[j]; if (type == -1) { row_norm_i = hypre_max(row_norm_i, hypre_cabs(val)); } else if (type == 1) { row_norm_i += hypre_cabs(val); } else if (type == 2) { row_norm_i += val * val; } } /* allreduce to get the row norm on all threads */ if (type == -1) { row_norm_i = warp_allreduce_max(row_norm_i); } else { row_norm_i = warp_allreduce_sum(row_norm_i); } if (type == 2) { row_norm_i = sqrt(row_norm_i); } /* set elmt_tols_diag */ for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE) { HYPRE_Int col = A_diag_j[j]; /* elmt_tol = 0.0 ensures diagonal will be kept */ if (col == row_i) { elmt_tols_diag[j] = 0.0; } else { elmt_tols_diag[j] = tol * row_norm_i; } } /* set elmt_tols_offd */ for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE) { elmt_tols_offd[j] = tol * row_norm_i; } } /* drop the entries that are not on the diagonal and smaller than: * type 0: tol * type 1: tol*(1-norm of row) * type 2: tol*(2-norm of row) * type -1: tol*(infinity norm of row) */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A, HYPRE_Complex tol, HYPRE_Int type) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Real *elmt_tols_diag = NULL; HYPRE_Real *elmt_tols_offd = NULL; if (col_map_offd_A == NULL) { col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A; } /* get elmement-wise tolerances if needed */ if (type != 0) { elmt_tols_diag = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_diag), HYPRE_MEMORY_DEVICE); elmt_tols_offd = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); } dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); if (type == -1) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols < -1 >, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } if (type == 1) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<1>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } if (type == 2) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<2>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } /* drop entries from diag and offd CSR matrices */ hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, elmt_tols_diag); hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, elmt_tols_offd); hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); /* squeeze out zero columns of A_offd */ HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new; tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); tmp_end = HYPRE_THRUST_CALL( unique, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); num_cols_A_offd_new = tmp_end - tmp_j; hypre_assert(num_cols_A_offd_new <= num_cols_A_offd); if (num_cols_A_offd_new < num_cols_A_offd) { hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new; HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( scatter, thrust::counting_iterator<HYPRE_Int>(0), thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new), tmp_j, offd_mark ); HYPRE_THRUST_CALL( gather, hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd), offd_mark, hypre_CSRMatrixJ(A_offd) ); HYPRE_THRUST_CALL( gather, tmp_j, tmp_j + num_cols_A_offd_new, col_map_offd_A, col_map_offd_A_new ); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE); hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new; hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } if (type != 0) { hypre_TFree(elmt_tols_diag, HYPRE_MEMORY_DEVICE); hypre_TFree(elmt_tols_offd, HYPRE_MEMORY_DEVICE); } hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTransposeDevice *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTransposeDevice( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *A_diagT; hypre_CSRMatrix *AT_offd; HYPRE_Int num_procs; HYPRE_Int num_cols_offd_AT = 0; HYPRE_BigInt *col_map_offd_AT = NULL; hypre_ParCSRMatrix *AT; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); if (num_procs > 1) { void *request; hypre_CSRMatrix *A_offdT, *Aext; HYPRE_Int *Aext_ii, *Aext_j, Aext_nnz; HYPRE_Complex *Aext_data; HYPRE_BigInt *tmp_bigj; hypre_CSRMatrixTranspose(A_offd, &A_offdT, data); hypre_CSRMatrixBigJ(A_offdT) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumNonzeros(A_offdT), HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixJ(A_offdT), hypre_CSRMatrixJ(A_offdT) + hypre_CSRMatrixNumNonzeros(A_offdT), thrust::make_constant_iterator(hypre_ParCSRMatrixFirstRowIndex(A)), hypre_CSRMatrixBigJ(A_offdT), thrust::plus<HYPRE_BigInt>() ); if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ExchangeExternalRowsDeviceInit(A_offdT, hypre_ParCSRMatrixCommPkg(A), data, &request); hypre_CSRMatrixTranspose(A_diag, &A_diagT, data); Aext = hypre_ExchangeExternalRowsDeviceWait(request); hypre_CSRMatrixDestroy(A_offdT); // Aext contains offd of AT Aext_nnz = hypre_CSRMatrixNumNonzeros(Aext); Aext_ii = hypreDevice_CsrRowPtrsToIndices(hypre_CSRMatrixNumRows(Aext), Aext_nnz, hypre_CSRMatrixI(Aext)); hypre_ParCSRCommPkgCopySendMapElmtsToDevice(hypre_ParCSRMatrixCommPkg(A)); HYPRE_THRUST_CALL( gather, Aext_ii, Aext_ii + Aext_nnz, hypre_ParCSRCommPkgDeviceSendMapElmts(hypre_ParCSRMatrixCommPkg(A)), Aext_ii ); tmp_bigj = hypre_TAlloc(HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_bigj, hypre_CSRMatrixBigJ(Aext), HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_bigj, tmp_bigj + Aext_nnz ); HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp_bigj, tmp_bigj + Aext_nnz ); num_cols_offd_AT = new_end - tmp_bigj; col_map_offd_AT = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_AT, tmp_bigj, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_TFree(tmp_bigj, HYPRE_MEMORY_DEVICE); Aext_j = hypre_TAlloc(HYPRE_Int, Aext_nnz, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( lower_bound, col_map_offd_AT, col_map_offd_AT + num_cols_offd_AT, hypre_CSRMatrixBigJ(Aext), hypre_CSRMatrixBigJ(Aext) + Aext_nnz, Aext_j ); Aext_data = hypre_CSRMatrixData(Aext); hypre_CSRMatrixData(Aext) = NULL; hypre_CSRMatrixDestroy(Aext); if (data) { hypreDevice_StableSortByTupleKey(Aext_nnz, Aext_ii, Aext_j, Aext_data, 0); } else { HYPRE_THRUST_CALL( stable_sort, thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)), thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)) + Aext_nnz ); } AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), num_cols_offd_AT, Aext_nnz); hypre_CSRMatrixJ(AT_offd) = Aext_j; hypre_CSRMatrixData(AT_offd) = Aext_data; hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE); hypreDevice_CsrRowIndicesToPtrs_v2(hypre_CSRMatrixNumRows(AT_offd), Aext_nnz, Aext_ii, hypre_CSRMatrixI(AT_offd)); hypre_TFree(Aext_ii, HYPRE_MEMORY_DEVICE); } else { hypre_CSRMatrixTransposeDevice(A_diag, &A_diagT, data); AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), 0, 0); hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE); } AT = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixColStarts(A), hypre_ParCSRMatrixRowStarts(A), num_cols_offd_AT, hypre_CSRMatrixNumNonzeros(A_diagT), hypre_CSRMatrixNumNonzeros(AT_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(AT)); hypre_ParCSRMatrixDiag(AT) = A_diagT; hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(AT)); hypre_ParCSRMatrixOffd(AT) = AT_offd; if (num_cols_offd_AT) { hypre_ParCSRMatrixDeviceColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixColMapOffd(AT) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AT), col_map_offd_AT, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } *AT_ptr = AT; return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixAddDevice( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_cols_offd_C = 0; HYPRE_BigInt *d_col_map_offd_C = NULL; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); hypre_CSRMatrix *C_diag = hypre_CSRMatrixAddDevice(alpha, A_diag, beta, B_diag); hypre_CSRMatrix *C_offd; //if (num_cols_offd_A || num_cols_offd_B) if (num_procs > 1) { hypre_ParCSRMatrixCopyColMapOffdToDevice(A); hypre_ParCSRMatrixCopyColMapOffdToDevice(B); HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A + num_cols_offd_B, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp, hypre_ParCSRMatrixDeviceColMapOffd(A), HYPRE_BigInt, num_cols_offd_A, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp + num_cols_offd_A, hypre_ParCSRMatrixDeviceColMapOffd(B), HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp, tmp + num_cols_offd_A + num_cols_offd_B ); HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp, tmp + num_cols_offd_A + num_cols_offd_B ); num_cols_offd_C = new_end - tmp; d_col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_C, tmp, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* reuse memory of tmp */ HYPRE_Int *offd_A2C = (HYPRE_Int *) tmp; HYPRE_Int *offd_B2C = offd_A2C + num_cols_offd_A; HYPRE_THRUST_CALL( lower_bound, d_col_map_offd_C, d_col_map_offd_C + num_cols_offd_C, hypre_ParCSRMatrixDeviceColMapOffd(A), hypre_ParCSRMatrixDeviceColMapOffd(A) + num_cols_offd_A, offd_A2C ); HYPRE_THRUST_CALL( lower_bound, d_col_map_offd_C, d_col_map_offd_C + num_cols_offd_C, hypre_ParCSRMatrixDeviceColMapOffd(B), hypre_ParCSRMatrixDeviceColMapOffd(B) + num_cols_offd_B, offd_B2C ); HYPRE_Int *C_offd_i, *C_offd_j, nnzC_offd; HYPRE_Complex *C_offd_a; hypreDevice_CSRSpAdd( hypre_CSRMatrixNumRows(A_offd), hypre_CSRMatrixNumRows(B_offd), num_cols_offd_C, hypre_CSRMatrixNumNonzeros(A_offd), hypre_CSRMatrixNumNonzeros(B_offd), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), alpha, hypre_CSRMatrixData(A_offd), offd_A2C, hypre_CSRMatrixI(B_offd), hypre_CSRMatrixJ(B_offd), beta, hypre_CSRMatrixData(B_offd), offd_B2C, NULL, &nnzC_offd, &C_offd_i, &C_offd_j, &C_offd_a ); hypre_TFree(tmp, HYPRE_MEMORY_DEVICE); C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), num_cols_offd_C, nnzC_offd); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_DEVICE; } else { C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), 0, 0); hypre_CSRMatrixInitialize_v2(C_offd, 0, HYPRE_MEMORY_DEVICE); } /* Create ParCSRMatrix C */ hypre_ParCSRMatrix *C = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_ParCSRMatrixDeviceColMapOffd(C) = d_col_map_offd_C; hypre_ParCSRMatrixColMapOffd(C) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(C), d_col_map_offd_C, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScaleVector( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data); //hypre_SyncComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i] / A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
1346.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp dist_schedule(static, #p11) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
GrB_Descriptor_wait.c
//------------------------------------------------------------------------------ // GrB_Descriptor_wait: wait for a user-defined GrB_Descriptor to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_Descriptor has no pending // operations to wait for. All this method does is verify that the descriptor // is properly initialized, and then it does an OpenMP flush. Note that unlike // other methods, passing in a NULL pointer, or a pointer to a NULL descriptor // is valid, since a NULL descriptor results in default settings. #include "GB.h" GrB_Info GrB_Descriptor_wait // no work, just check if GrB_Descriptor is valid ( GrB_Descriptor desc, GrB_WaitMode waitmode ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Descriptor_wait (desc, waitmode)") ; if (desc != NULL) GB_RETURN_IF_FAULTY (desc) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
relu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "relu_kernel_arm.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) static inline int relu_kernel(const int i, const int id, const void* data, const float* input, float* output, const float slope) { float32x4_t _zero = vdupq_n_f32(0.f); int step = (( int* )data)[0]; const float* cur_input = input + id * step; float* cur_output = output + id * step; if (slope == 0) { for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); _p = vmaxq_f32(_p, _zero); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(*cur_input++, 0.f); } } else { float32x4_t _slope = vdupq_n_f32(slope); for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); // ri = ai <= bi ? 1...1:0...0 uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _ps = vmulq_f32(_p, _slope); // bitwise select _p = vbslq_f32(_lemask, _ps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(cur_input[0], 0.f) + slope * MIN(cur_input[0], 0.f); cur_input++; } } return 0; } int relu_arm_run(struct tensor* output_tensor, struct tensor* input_tensor, struct relu_param* relu_param, int num_thread) { float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; float negativeslope = relu_param->negative_slope; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; relu_kernel(0, 0, &chan_size, data + offset, out_data + offset, negativeslope); } return 0; }
terrain.c
#include "oilchange.h" float hmap[TILESW][TILESD]; float hmap2[TILESW][TILESD]; int tscootx, tscootz, tchunk_scootx, tchunk_scootz; void gen_hmap(int x0, int x2, int z0, int z2) { unsigned seed = SEED4(x0, x2, z0, z2); // pick corners if they aren't set if (hmap[x0][z0] == 0) hmap[x0][z0] = RANDI(64, 127); if (hmap[x0][z2] == 0) hmap[x0][z2] = RANDI(64, 127); if (hmap[x2][z0] == 0) hmap[x2][z0] = RANDI(64, 127); if (hmap[x2][z2] == 0) hmap[x2][z2] = RANDI(64, 127); int x1 = (x0 + x2) / 2; int z1 = (z0 + z2) / 2; int w = (x2 - x0) / 4; int d = (z2 - z0) / 4; w = w ? w : 1; d = d ? d : 1; float d2 = d / 2.f; float r = w > 2 ? 1.f : 0.f; // edges middles if (!hmap[x0][z1]) hmap[x0][z1] = (hmap[x0][z0] + hmap[x0][z2]) / 2.f + r * RANDF(-d2, d2); if (!hmap[x2][z1]) hmap[x2][z1] = (hmap[x2][z0] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2); if (!hmap[x1][z0]) hmap[x1][z0] = (hmap[x0][z0] + hmap[x2][z0]) / 2.f + r * RANDF(-d2, d2); if (!hmap[x1][z2]) hmap[x1][z2] = (hmap[x0][z2] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2); // middle middle hmap[x1][z1] = (hmap[x0][z1] + hmap[x2][z1] + hmap[x1][z0] + hmap[x1][z2]) / 4.f + r * RANDF(-d, d); // recurse if there are any unfilled spots if(x1 - x0 > 1 || x2 - x1 > 1 || z1 - z0 > 1 || z2 - z1 > 1) { gen_hmap(x0, x1, z0, z1); gen_hmap(x0, x1, z1, z2); gen_hmap(x1, x2, z0, z1); gen_hmap(x1, x2, z1, z2); } } void smooth_hmap() { for (int x = 0; x < TILESW; x++) for (int z = 0; z < TILESD; z++) { float p365 = noise(x, 0, -z, 365); int radius = p365 < 0.0f ? 3 : p365 < 0.2f ? 2 : 1; int x0 = x - radius; int x1 = x + radius + 1; int z0 = z - radius; int z1 = z + radius + 1; CLAMP(x0, 0, TILESW-1); CLAMP(x1, 0, TILESW-1); CLAMP(z0, 0, TILESD-1); CLAMP(z1, 0, TILESD-1); int sum = 0, n = 0; for (int i = x0; i < x1; i++) for (int j = z0; j < z1; j++) { sum += hmap[i][j]; n++; } int res = sum / n; float p800 = noise(x, 0, z, 800); float p777 = noise(z, 0, x, 777); float p301 = noise(x, 0, z, 301); float p204 = noise(x, 0, z, 204); float p33 = noise(x, 0, z, 32 * (1.1 + p301)); float swoosh = p33 > 0.3 ? (10 - 30 * (p33 - 0.3)) : 0; float times = (p204 * 20.f) + 30.f; float plus = (-p204 * 40.f) + 60.f; CLAMP(times, 20.f, 40.f); CLAMP(plus, 40.f, 80.f); int beach_ht = (1.f - p777) * times + plus; CLAMP(beach_ht, 90, 100); if (res > beach_ht) // beaches { if (res > beach_ht + 21) res -= 18; else res = ((res - beach_ht) / 7) + beach_ht; } float s = (1 + p204) * 0.2; if (p800 > 0.0 + s) { float t = (p800 - 0.0 - s) * 10; CLAMP(t, 0.f, 1.f); res = lerp(t, res, 102); if (res == 102 && swoosh) res = 101; } hmap2[x][z] = res < TILESH - 1 ? res : TILESH - 1; } } void create_hmap() { // generate in pieces for (int i = 0; i < 8; i++) for (int j = 0; j < 8; j++) { int x0 = (i ) * TILESW / 8; int x1 = (i+1) * TILESW / 8; int z0 = (j ) * TILESD / 8; int z1 = (j+1) * TILESD / 8; CLAMP(x1, 0, TILESW-1); CLAMP(z1, 0, TILESD-1); gen_hmap(x0, x1, z0 , z1); } smooth_hmap(); } void gen_chunk(int xlo, int xhi, int zlo, int zhi) { CLAMP(xlo, 0, TILESW-1); CLAMP(xhi, 0, TILESW-1); CLAMP(zlo, 0, TILESD-1); CLAMP(zhi, 0, TILESD-1); static char column_already_generated[TILESW][TILESD]; int x; #pragma omp parallel for for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++) { if (x == xlo && z == zlo) omp_threads = omp_get_num_threads(); if (column_already_generated[x][z]) continue; column_already_generated[x][z] = true; float p1080 = noise(x, 0, -z, 1080); float p530 = noise(z, 0, x, 530); float p630 = noise(-z, 0, x, 629); float p200 = noise(x, 0, z, 200); float p80 = noise(x, 0, z, 80); float p15 = noise(z, 0, -x, 15); //float p5 = noise(-x, 0, z, 5); if (p200 > 0.2f) { float flatten = (p200 - 0.2f) * 80; CLAMP(flatten, 1, 12); hmap2[x][z] -= 100; hmap2[x][z] /= flatten; hmap2[x][z] += 100; } int solid_depth = 0; int slicey_bit = false; int plateau_bit = false; int mode = p1080 > 0 ? 1 : 10; for (int y = 0; y < TILESH; y++) { if (y == TILESH - 1) { TT_(x, y, z) = HARD; continue; } float p300 = noise(x, y, z, 300); float p32 = noise(x, y*mode, z, 16 + 16 * (1.1 + p300)); float plat = p32 > 0.3 ? (10 - 30 * (p32 * p32 * p32 - 0.3)) : 0; float p90 = noise(x, y, z, 90); float p91 = noise(x+1000, y+1000, z+1000, 91); float p42 = noise(x, y*(p300 + 1), z, 42); float p9 = noise(x, y*0.05, z, 9); float p2 = noise(-z, y, x, 2); if (p300 + fabsf(p80) * 0.25 + p15 * 0.125 < -0.5) { plat = -plat; } else if (p300 < 0.5) { plat = 0; } int cave = (p90 < -0.24 || p91 < -0.24) && (p42 > 0.5 && p9 < 0.4); if (y > hmap2[x][z] - ((p80 + 1) * 20) && p90 > 0.4 && p91 > 0.4 && p42 > 0.01 && p42 < 0.09 && p300 > 0.3) slicey_bit = true; int platted = y < hmap2[x][z] + plat * (mode * 0.125f + 0.875f); if ((cave || platted) && !plateau_bit) { unsigned seed = SEED2(x, z); if (!slicey_bit || RANDP(5)) { int type = (y > 100 && hmap2[x][z] > 99) ? WATR : OPEN; //only allow water below low heightmap TT_(x, y, z) = type; solid_depth = 0; slicey_bit = false; goto out; } } else { if (mode == 10 && plat && !cave && y < hmap2[x][z]) plateau_bit = true; slicey_bit = false; } solid_depth++; float p16 = noise(x, y, z, 16); int slv = 76 + p530 * 20; int dlv = 86 + p630 * 20; int ore = p2 > 0.4f ? ORE : OREH; int ston = p42 > 0.4f && p9 < -0.3f ? ore : STON; if (slicey_bit) TT_(x, y, z) = p9 > 0.4f ? HARD : SAND; else if (solid_depth > 14 + 5 * p9) TT_(x, y, z) = GRAN; else if (y < slv - 5 * p16) TT_(x, y, z) = ston; else if (y < dlv - 5 * p16) TT_(x, y, z) = p80 > (-solid_depth * 0.1f) ? DIRT : OPEN; // erosion else if (y < 100 - 5 * p16) TT_(x, y, z) = solid_depth == 1 ? GRAS : DIRT; else if (y < 120 ) TT_(x, y, z) = solid_depth < 4 + 5 * p9 ? SAND : ston; else TT_(x, y, z) = HARD; out: ; } } // find nearby bezier curvy caves #define REGW (CHUNKW*16) #define REGD (CHUNKD*16) // find region ,-- have to add 1 bc we're overdrawing chunks // lower bound / int rxlo = (int)((xlo+1) / REGW) * REGW; int rzlo = (int)((zlo+1) / REGD) * REGD; unsigned seed = SEED2(rxlo, rzlo); // find region center int rxcenter = rxlo + REGW/2; int rzcenter = rzlo + REGD/2; struct point PC = (struct point){rxcenter, TILESH - RANDI(1, 25), rzcenter}; struct point P0; struct point P1; struct point P2; struct point P3 = PC; int nr_caves = RANDI(0, 100); // cave system stretchiness int sx = RANDI(10, 60); int sy = RANDI(10, 60); int sz = RANDI(10, 60); #define MAX_CAVE_POINTS 10000 #define QCAVE(x,y,z,radius_sq) ((struct qcave){x, y, z, radius_sq}) struct qcave cave_points[MAX_CAVE_POINTS]; int cave_p_len = 0; for (int i = 0; i < nr_caves; i++) { // random walk from center of region, or end of last curve P0 = RANDP(33) ? PC : P3; P1 = (struct point){P0.x + RANDI(-sx, sx), P0.y + RANDI(-sy, sy), P0.z + RANDI(-sz, sz)}; P2 = (struct point){P1.x + RANDI(-sx, sx), P1.y + RANDI(-sy, sy), P1.z + RANDI(-sz, sz)}; P3 = (struct point){P2.x + RANDI(-sx, sx), P2.y + RANDI(-sy, sy), P2.z + RANDI(-sz, sz)}; float root_radius = 0.f, delta = 0.f; for (float t = 0.f; t <= 1.f; t += 0.001f) { if (cave_p_len >= MAX_CAVE_POINTS) break; if (root_radius == 0.f || RANDP(0.002f)) { root_radius = RAND01; delta = RANDF(-0.001f, 0.001f); } root_radius += delta; float radius_sq = root_radius * root_radius * root_radius * root_radius * 50.f; CLAMP(radius_sq, 1.f, 50.f); float s = 1.f - t; int x = (int)(s*s*s*P0.x + 3.f*t*s*s*P1.x + 3.f*t*t*s*P2.x + t*t*t*P3.x); int y = (int)(s*s*s*P0.y + 3.f*t*s*s*P1.y + 3.f*t*t*s*P2.y + t*t*t*P3.y); int z = (int)(s*s*s*P0.z + 3.f*t*s*s*P1.z + 3.f*t*t*s*P2.z + t*t*t*P3.z); // TODO: don't store duplicate cave points? if (x >= xlo && x <= xhi && y >= 0 && y <= TILESD - 1 && z >= zlo && z <= zhi) cave_points[cave_p_len++] = QCAVE(x, y, z, radius_sq); } } // carve caves #pragma omp parallel for for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++) for (int y = 0; y < TILESH-2; y++) for (int i = 0; i < cave_p_len; i++) { int dist_sq = DIST_SQ(cave_points[i].x - x, cave_points[i].y - y, cave_points[i].z - z); if (dist_sq <= cave_points[i].radius_sq) { TT_(x, y, z) = OPEN; break; } } // correcting pass over middle, contain floating water #pragma omp parallel for for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++) for (int y = 100; y < TILESH-2; y++) { if (TT_(x, y, z) == WATR) { if (TT_(x , y , z-1) == OPEN || TT_(x , y , z+1) == OPEN || TT_(x-1, y , z ) == OPEN || TT_(x+1, y , z ) == OPEN || TT_(x , y+1, z ) == OPEN) TT_(x, y, z) = WOOD; } } // trees? float p191 = noise(zlo, 0, xlo, 191); seed = SEED2(xlo, zlo); if (p191 > 0.2f) while (RANDP(95)) { char leaves = RANDBOOL ? RLEF : YLEF; float radius = RANDF(1.f, 4.f); int x = xlo + CHUNKW/2 + RANDI(-5, 5); int z = zlo + CHUNKD/2 + RANDI(-5, 5); for (int y = 10; y < TILESH-2; y++) { if (TT_(x, y, z) == OPEN) continue; if (TT_(x, y, z) != GRAS && TT_(x, y, z) != DIRT) break; int yy = y; for (; yy >= y - RANDI(3, 8); yy--) TT_(x, yy, z) = WOOD; int ymax = yy + RANDI(2, 4); for (int i = x-3; i <= x+3; i++) for (int j = yy-3; j <= ymax; j++) for (int k = z-3; k <= z+3; k++) { float dist = (i-x) * (i-x) + (j-yy) * (j-yy) + (k-z) * (k-z); if (TT_(i, j, k) == OPEN && dist < radius * radius) TT_(i, j, k) = leaves; } break; } } // cleanup gndheight and set initial lighting #pragma omp parallel for for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++) { int above_ground = true; int light_level = 15; int wet = false; for (int y = 0; y < TILESH-1; y++) { if (above_ground && IS_OPAQUE(x, y, z)) { TGNDH_(x, z) = y; above_ground = false; if (y) { TSUN_(x, y-1, z) = 0; sun_enqueue(x, y-1, z, 0, light_level); } light_level = 0; } if (wet && TT_(x, y, z) == OPEN) TT_(x, y, z) = WATR; if (wet && IS_SOLID(x, y, z)) wet = false; if (TT_(x, y, z) == WATR) { wet = true; if (light_level) light_level--; if (light_level) light_level--; } TSUN_(x, y, z) = light_level; } } recalc_corner_lighting(xlo, xhi, zlo, zhi); } // update terrain worker thread(s) copies of scoot vars void terrain_apply_scoot() { #pragma omp critical { tscootx = future_scootx * CHUNKW; tscootz = future_scootz * CHUNKD; tchunk_scootx = future_scootx; tchunk_scootz = future_scootz; } } // on its own thread, loops forever building chunks when needed void chunk_builder() { for(;;) { terrain_apply_scoot(); int best_x = 0, best_z = 0; int px = (player[0].pos.x / BS + CHUNKW2) / CHUNKW; int pz = (player[0].pos.z / BS + CHUNKD2) / CHUNKD; CLAMP(px, 0, VAOW-1); CLAMP(pz, 0, VAOD-1); // find nearest ungenerated chunk int best_dist = 99999999; for (int x = 0; x < VAOW; x++) for (int z = 0; z < VAOD; z++) { if (TAGEN_(x, z)) continue; int dist_sq = (x - px) * (x - px) + (z - pz) * (z - pz); if (dist_sq < best_dist) { best_dist = dist_sq; best_x = x; best_z = z; } } if (best_dist == 99999999) { SDL_Delay(1); continue; } int xlo = best_x * CHUNKW; int zlo = best_z * CHUNKD; int xhi = xlo + CHUNKW; int zhi = zlo + CHUNKD; int ticks_before = SDL_GetTicks(); gen_chunk(xlo-1, xhi+1, zlo-1, zhi+1); nr_chunks_generated++; chunk_gen_ticks += SDL_GetTicks() - ticks_before; TAGEN_(best_x, best_z) = true; #pragma omp critical { just_generated[just_gen_len].x = best_x; just_generated[just_gen_len].z = best_z; just_gen_len++; } } }
GB_unop__tgamma_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__tgamma_fp64_fp64 // op(A') function: GB_unop_tran__tgamma_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = tgamma (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tgamma (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = tgamma (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TGAMMA || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__tgamma_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = tgamma (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = tgamma (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__tgamma_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rfw_random.h
/* Algorithm for Steiner Problem in Graphs Copyright (c) Microsoft Corporation All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once /* C++ Wrapper written by Microsoft Corporation on original C code by Nishimura and Matsumoto. Original header follows. */ /* A C-program for MT19937, with initialization improved 2002/1/26. Coded by Takuji Nishimura and Makoto Matsumoto. Before using, initialize the state by using init_genrand(seed) or init_by_array(init_key, key_length). Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Any feedback is very welcome. http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) */ #ifndef RFW_RANDOM_H #define RFW_RANDOM_H #include <cstdlib> #include <cstdio> class RFWRandom { public: static const unsigned long int maxvalue; /* Period parameters */ enum {N=624, M=397}; static const unsigned long int MATRIX_A; /* constant vector a */ static const unsigned long int UPPER_MASK; /* most significant w-r bits */ static const unsigned long int LOWER_MASK; /* least significant r bits */ private: static unsigned long mt[N]; /* the array for the state vector */ static int mti; /* mti==N+1 means mt[N] is not initialized */ /* initializes mt[N] with a seed */ static void init_genrand(unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti<N; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /* generates a random number on [0,0xffffffff]-interval */ static unsigned long genrand_int32(void) { unsigned long y; static unsigned long mag01[2]={0x0UL, MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= N) { /* generate N words at one time */ int kk; if (mti == N+1) /* if init_genrand() has not been called, */ init_genrand(5489UL); /* a default initial seed is used */ for (kk=0;kk<N-M;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk<N-1;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } public: //constructors RFWRandom () {randomize(1);} //randomize procedures static void randomize (unsigned long s) { if (s==0) s = 1; init_genrand(s); } static unsigned long getRand() {return genrand_int32();} //pick an integer uniformly at random between inf and sup (both inclusive) static int getInteger (int inf, int sup) { if (sup<=inf) return inf; unsigned long range, minallowed, u; range = (unsigned long)(sup-inf+1); //number of values allowed minallowed = (maxvalue % range) + 1; //restrict search space to avoid small numbers if (minallowed==range) minallowed = 0; do {u = getRand();} //repeat until a good number is found while (u < minallowed); return (inf + (int)(u % range)); //return a number in the range } static float getFloat () {return (float)getDouble();} //get a float number in [0;1] static double getDouble() {return getDoubleClosed();} //double in the range [0;1] static double getDoubleClosed() {return ((double)getRand()/(double)maxvalue);} //double in the range [0;1] static double getDoubleOpen() {return ((double)getRand()/((double)(maxvalue)+1.0));} //double in the range [0;1) static bool getBool () {return (getRand() & 1);} }; class RFWLocalRandom { private: //static const unsigned long int maxvalue; /* Period parameters */ //enum {N=624, M=397}; //static const unsigned long int MATRIX_A; /* constant vector a */ //static const unsigned long int UPPER_MASK; /* most significant w-r bits */ //static const unsigned long int LOWER_MASK; /* least significant r bits */ unsigned long mt[RFWRandom::N]; /* the array for the state vector */ int mti; /* mti==N+1 means mt[N] is not initialized */ /* initializes mt[N] with a seed */ void init_genrand(unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti<RFWRandom::N; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /* generates a random number on [0,0xffffffff]-interval */ unsigned long genrand_int32(void) { unsigned long y; unsigned long mag01[2]={0x0UL, RFWRandom::MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= RFWRandom::N) { /* generate N words at one time */ int kk; if (mti == RFWRandom::N+1) /* if init_genrand() has not been called, */ init_genrand(5489UL); /* a default initial seed is used */ for (kk=0;kk<RFWRandom::N-RFWRandom::M;kk++) { y = (mt[kk]&RFWRandom::UPPER_MASK)|(mt[kk+1]&RFWRandom::LOWER_MASK); mt[kk] = mt[kk+RFWRandom::M] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk<RFWRandom::N-1;kk++) { y = (mt[kk]&RFWRandom::UPPER_MASK)|(mt[kk+1]&RFWRandom::LOWER_MASK); mt[kk] = mt[kk+(RFWRandom::M-RFWRandom::N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt[RFWRandom::N-1]&RFWRandom::UPPER_MASK)|(mt[0]&RFWRandom::LOWER_MASK); mt[RFWRandom::N-1] = mt[RFWRandom::M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } public: //constructors RFWLocalRandom () {Randomize(1);} RFWLocalRandom (unsigned long s) {Randomize(s);} void CriticalRandomize() { //#pragma omp critical { //fprintf (stderr, "cr"); Randomize((unsigned int)RFWRandom::getInteger(0,2000000000)); } } void Randomize() { Randomize((unsigned int)RFWRandom::getInteger(0,2000000000)); } //randomize procedures void Randomize (unsigned long s) { if (s==0) s = 1; init_genrand(s); } unsigned long GetRand() {return genrand_int32();} //pick an integer uniformly at random between inf and sup (both inclusive) int GetInteger (int inf, int sup) { if (sup<=inf) return inf; unsigned long range, minallowed, u; range = (unsigned long)(sup-inf+1); //number of values allowed minallowed = (RFWRandom::maxvalue % range) + 1; //restrict search space to avoid small numbers if (minallowed==range) minallowed = 0; do {u = GetRand();} //repeat until a good number is found while (u < minallowed); return (inf + (int)(u % range)); //return a number in the range } float GetFloat () {return (float)GetDouble();} //get a float number in [0;1] double GetDouble() { return GetDoubleClosed(); } //double in the range [0;1] //double GetDouble() { double r = GetDoubleClosed(); fprintf(stderr, "<<< %.10f : %.10f : %d >>>", r, (double)RFWRandom::maxvalue, sizeof(RFWRandom::maxvalue)); return r; } //double in the range [0;1] double GetDoubleClosed() { return ((double)GetRand() / (double)RFWRandom::maxvalue); } //double in the range [0;1] double GetDoubleOpen() {return ((double)GetRand()/((double)(RFWRandom::maxvalue)+1.0));} //double in the range [0;1) bool GetBool () {return (GetRand() & 1);} }; #endif
GB_binop__bset_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bset_uint8 // A.*B function (eWiseMult): GB_AemultB__bset_uint8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bset_uint8 // C+=b function (dense accum): GB_Cdense_accumb__bset_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_uint8 // C=scalar+B GB_bind1st__bset_uint8 // C=scalar+B' GB_bind1st_tran__bset_uint8 // C=A+scalar GB_bind2nd__bset_uint8 // C=A'+scalar GB_bind2nd_tran__bset_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_BITSET (aij, bij, uint8_t, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITSET (x, y, uint8_t, 8) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_UINT8 || GxB_NO_BSET_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bset_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bset_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bset_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bset_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bset_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bset_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bset_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, uint8_t, 8) ; \ } GrB_Info GB_bind1st_tran__bset_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, uint8_t, 8) ; \ } GrB_Info GB_bind2nd_tran__bset_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rose_livenessTest.c
#include "omp.h" typedef double real8; void foo(real8 *y,real8 *d__,real8 *d11,real8 *d12,real8 *d13,real8 *d22,real8 *d23,real8 *d33,real8 *m,int *nell,real8 *p,int t,int flagB,int flagA,int ub) { int l; int nel; int t1 = t - 1; if (flagB == 0) { for (l = 0; l <= ub - 1; l += 1) { int l8 = l * 8; int l36 = l * 36; real8 h12 = m[(l8 + 0) * 4 + 1]; real8 h13 = m[(l8 + 0) * 4 + 2]; real8 h14 = m[(l8 + 0) * 4 + 3]; real8 h22 = m[(l8 + 1) * 4 + 1]; real8 h23 = m[(l8 + 1) * 4 + 2]; real8 h24 = m[(l8 + 1) * 4 + 3]; real8 h32 = m[(l8 + 2) * 4 + 1]; real8 h33 = m[(l8 + 2) * 4 + 2]; real8 h34 = m[(l8 + 2) * 4 + 3]; real8 h42 = m[(l8 + 3) * 4 + 1]; real8 h43 = m[(l8 + 3) * 4 + 2]; real8 h44 = m[(l8 + 3) * 4 + 3]; real8 h52 = m[(l8 + 4) * 4 + 1]; real8 h53 = m[(l8 + 4) * 4 + 2]; real8 h54 = m[(l8 + 4) * 4 + 3]; real8 h62 = m[(l8 + 5) * 4 + 1]; real8 h63 = m[(l8 + 5) * 4 + 2]; real8 h64 = m[(l8 + 5) * 4 + 3]; real8 h72 = m[(l8 + 6) * 4 + 1]; real8 h73 = m[(l8 + 6) * 4 + 2]; real8 h74 = m[(l8 + 6) * 4 + 3]; real8 h82 = m[(l8 + 7) * 4 + 1]; real8 h83 = m[(l8 + 7) * 4 + 2]; real8 h84 = m[(l8 + 7) * 4 + 3]; real8 ddd = d__[l]; y[l36 + 0] += ddd * (h12 * h12 + h13 * h13 + h14 * h14); y[l36 + 1] += ddd * (h12 * h22 + h13 * h23 + h14 * h24); y[l36 + 2] += ddd * (h22 * h22 + h23 * h23 + h24 * h24); y[l36 + 3] += ddd * (h12 * h32 + h13 * h33 + h14 * h34); y[l36 + 4] += ddd * (h22 * h32 + h23 * h33 + h24 * h34); y[l36 + 5] += ddd * (h32 * h32 + h33 * h33 + h34 * h34); y[l36 + 6] += ddd * (h12 * h42 + h13 * h43 + h14 * h44); y[l36 + 7] += ddd * (h22 * h42 + h23 * h43 + h24 * h44); y[l36 + 8] += ddd * (h32 * h42 + h33 * h43 + h34 * h44); y[l36 + 9] += ddd * (h42 * h42 + h43 * h43 + h44 * h44); y[l36 + 10] += ddd * (h12 * h52 + h13 * h53 + h14 * h54); y[l36 + 11] += ddd * (h22 * h52 + h23 * h53 + h24 * h54); y[l36 + 12] += ddd * (h32 * h52 + h33 * h53 + h34 * h54); y[l36 + 13] += ddd * (h42 * h52 + h43 * h53 + h44 * h54); y[l36 + 14] += ddd * (h52 * h52 + h53 * h53 + h54 * h54); y[l36 + 15] += ddd * (h12 * h62 + h13 * h63 + h14 * h64); y[l36 + 16] += ddd * (h22 * h62 + h23 * h63 + h24 * h64); y[l36 + 17] += ddd * (h32 * h62 + h33 * h63 + h34 * h64); y[l36 + 18] += ddd * (h42 * h62 + h43 * h63 + h44 * h64); y[l36 + 19] += ddd * (h52 * h62 + h53 * h63 + h54 * h64); y[l36 + 20] += ddd * (h62 * h62 + h63 * h63 + h64 * h64); y[l36 + 21] += ddd * (h12 * h72 + h13 * h73 + h14 * h74); y[l36 + 22] += ddd * (h22 * h72 + h23 * h73 + h24 * h74); y[l36 + 23] += ddd * (h32 * h72 + h33 * h73 + h34 * h74); y[l36 + 24] += ddd * (h42 * h72 + h43 * h73 + h44 * h74); y[l36 + 25] += ddd * (h52 * h72 + h53 * h73 + h54 * h74); y[l36 + 26] += ddd * (h62 * h72 + h63 * h73 + h64 * h74); y[l36 + 27] += ddd * (h72 * h72 + h73 * h73 + h74 * h74); y[l36 + 28] += ddd * (h12 * h82 + h13 * h83 + h14 * h84); y[l36 + 29] += ddd * (h22 * h82 + h23 * h83 + h24 * h84); y[l36 + 30] += ddd * (h32 * h82 + h33 * h83 + h34 * h84); y[l36 + 31] += ddd * (h42 * h82 + h43 * h83 + h44 * h84); y[l36 + 32] += ddd * (h52 * h82 + h53 * h83 + h54 * h84); y[l36 + 33] += ddd * (h62 * h82 + h63 * h83 + h64 * h84); y[l36 + 34] += ddd * (h72 * h82 + h73 * h83 + h74 * h84); y[l36 + 35] += ddd * (h82 * h82 + h83 * h83 + h84 * h84); } if (flagA > 0) { #pragma omp parallel for private (nel,l) firstprivate (ub,t1) for (l = 0; l <= ub - 1; l += 1) { int l8 = l * 8; real8 h1 = m[(t1 + l8) * 4 + 1]; real8 h2 = m[(t1 + l8) * 4 + 2]; real8 h3 = m[(t1 + l8) * 4 + 3]; nel = nell[l]; p[nell[l]] += d__[l] * 64. * (h1 * h1 + h2 * h2 + h3 * h3); } } } else { for (l = 0; l <= ub - 1; l += 1) { int l8 = l * 8; int l36 = l * 36; real8 d_11 = d11[l]; real8 d_12 = d12[l]; real8 d_13 = d13[l]; real8 d_22 = d22[l]; real8 d_23 = d23[l]; real8 d_33 = d33[l]; real8 h12 = m[(l8 + 0) * 4 + 1]; real8 h13 = m[(l8 + 0) * 4 + 2]; real8 h14 = m[(l8 + 0) * 4 + 3]; real8 h22 = m[(l8 + 1) * 4 + 1]; real8 h23 = m[(l8 + 1) * 4 + 2]; real8 h24 = m[(l8 + 1) * 4 + 3]; real8 h32 = m[(l8 + 2) * 4 + 1]; real8 h33 = m[(l8 + 2) * 4 + 2]; real8 h34 = m[(l8 + 2) * 4 + 3]; real8 h42 = m[(l8 + 3) * 4 + 1]; real8 h43 = m[(l8 + 3) * 4 + 2]; real8 h44 = m[(l8 + 3) * 4 + 3]; real8 h52 = m[(l8 + 4) * 4 + 1]; real8 h53 = m[(l8 + 4) * 4 + 2]; real8 h54 = m[(l8 + 4) * 4 + 3]; real8 h62 = m[(l8 + 5) * 4 + 1]; real8 h63 = m[(l8 + 5) * 4 + 2]; real8 h64 = m[(l8 + 5) * 4 + 3]; real8 h72 = m[(l8 + 6) * 4 + 1]; real8 h73 = m[(l8 + 6) * 4 + 2]; real8 h74 = m[(l8 + 6) * 4 + 3]; real8 h82 = m[(l8 + 7) * 4 + 1]; real8 h83 = m[(l8 + 7) * 4 + 2]; real8 h84 = m[(l8 + 7) * 4 + 3]; y[l36 + 0] = y[l36 + 0] + h12 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h13 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h14 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 1] = y[l36 + 1] + h22 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h23 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h24 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 2] = y[l36 + 2] + h22 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h23 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h24 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 3] = y[l36 + 3] + h32 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h33 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h34 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 4] = y[l36 + 4] + h32 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h33 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h34 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 5] = y[l36 + 5] + h32 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h33 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h34 * (d_13 * h32 + d_23 * h33 + d_33 * h34); y[l36 + 6] = y[l36 + 6] + h42 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h43 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h44 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 7] = y[l36 + 7] + h42 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h43 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h44 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 8] = y[l36 + 8] + h42 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h43 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h44 * (d_13 * h32 + d_23 * h33 + d_33 * h34); y[l36 + 9] = y[l36 + 9] + h42 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h43 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h44 * (d_13 * h42 + d_23 * h43 + d_33 * h44); y[l36 + 10] = y[l36 + 10] + h52 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h53 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h54 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 11] = y[l36 + 11] + h52 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h53 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h54 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 12] = y[l36 + 12] + h52 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h53 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h54 * (d_13 * h32 + d_23 * h33 + d_33 * h34); y[l36 + 13] = y[l36 + 13] + h52 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h53 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h54 * (d_13 * h42 + d_23 * h43 + d_33 * h44); y[l36 + 14] = y[l36 + 14] + h52 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h53 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h54 * (d_13 * h52 + d_23 * h53 + d_33 * h54); y[l36 + 15] = y[l36 + 15] + h62 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h63 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h64 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 16] = y[l36 + 16] + h62 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h63 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h64 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 17] = y[l36 + 17] + h62 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h63 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h64 * (d_13 * h32 + d_23 * h33 + d_33 * h34); y[l36 + 18] = y[l36 + 18] + h62 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h63 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h64 * (d_13 * h42 + d_23 * h43 + d_33 * h44); y[l36 + 19] = y[l36 + 19] + h62 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h63 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h64 * (d_13 * h52 + d_23 * h53 + d_33 * h54); y[l36 + 20] = y[l36 + 20] + h62 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h63 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h64 * (d_13 * h62 + d_23 * h63 + d_33 * h64); y[l36 + 21] = y[l36 + 21] + h72 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h73 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h74 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 22] = y[l36 + 22] + h72 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h73 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h74 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 23] = y[l36 + 23] + h72 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h73 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h74 * (d_13 * h32 + d_23 * h33 + d_33 * h34); y[l36 + 24] = y[l36 + 24] + h72 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h73 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h74 * (d_13 * h42 + d_23 * h43 + d_33 * h44); y[l36 + 25] = y[l36 + 25] + h72 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h73 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h74 * (d_13 * h52 + d_23 * h53 + d_33 * h54); y[l36 + 26] = y[l36 + 26] + h72 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h73 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h74 * (d_13 * h62 + d_23 * h63 + d_33 * h64); y[l36 + 27] = y[l36 + 27] + h72 * (d_11 * h72 + d_12 * h73 + d_13 * h74) + h73 * (d_12 * h72 + d_22 * h73 + d_23 * h74) + h74 * (d_13 * h72 + d_23 * h73 + d_33 * h74); y[l36 + 28] = y[l36 + 28] + h82 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h83 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h84 * (d_13 * h12 + d_23 * h13 + d_33 * h14); y[l36 + 29] = y[l36 + 29] + h82 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h83 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h84 * (d_13 * h22 + d_23 * h23 + d_33 * h24); y[l36 + 30] = y[l36 + 30] + h82 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h83 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h84 * (d_13 * h32 + d_23 * h33 + d_33 * h34); y[l36 + 31] = y[l36 + 31] + h82 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h83 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h84 * (d_13 * h42 + d_23 * h43 + d_33 * h44); y[l36 + 32] = y[l36 + 32] + h82 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h83 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h84 * (d_13 * h52 + d_23 * h53 + d_33 * h54); y[l36 + 33] = y[l36 + 33] + h82 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h83 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h84 * (d_13 * h62 + d_23 * h63 + d_33 * h64); y[l36 + 34] = y[l36 + 34] + h82 * (d_11 * h72 + d_12 * h73 + d_13 * h74) + h83 * (d_12 * h72 + d_22 * h73 + d_23 * h74) + h84 * (d_13 * h72 + d_23 * h73 + d_33 * h74); y[l36 + 35] = y[l36 + 35] + h82 * (d_11 * h82 + d_12 * h83 + d_13 * h84) + h83 * (d_12 * h82 + d_22 * h83 + d_23 * h84) + h84 * (d_13 * h82 + d_23 * h83 + d_33 * h84); } if (flagA > 0) { #pragma omp parallel for private (nel,l) firstprivate (ub,t1) for (l = 0; l <= ub - 1; l += 1) { int l8 = l * 8; real8 h1 = m[(t1 + l8) * 4 + 1]; real8 h2 = m[(t1 + l8) * 4 + 2]; real8 h3 = m[(t1 + l8) * 4 + 3]; nel = nell[l]; p[nell[l]] += (h1 * (d11[l] * h1 + d12[l] * 2. * h2 + d13[l] * 2. * h3) + h2 * (d22[l] * h2 + d23[l] * 2. * h3) + h3 * d33[l] * h3) * 64.; } } } }
domdec_con.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2008 * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <assert.h> #include "smalloc.h" #include "vec.h" #include "constr.h" #include "domdec.h" #include "domdec_network.h" #include "mtop_util.h" #include "gmx_ga2la.h" #include "gmx_hash.h" #include "gmx_omp_nthreads.h" typedef struct { int nsend; int *a; int a_nalloc; int nrecv; } gmx_specatsend_t; typedef struct { int *ind; int nalloc; int n; } ind_req_t; typedef struct gmx_domdec_specat_comm { /* The number of indices to receive during the setup */ int nreq[DIM][2][2]; /* The atoms to send */ gmx_specatsend_t spas[DIM][2]; gmx_bool *bSendAtom; int bSendAtom_nalloc; /* Send buffers */ int *ibuf; int ibuf_nalloc; rvec *vbuf; int vbuf_nalloc; rvec *vbuf2; int vbuf2_nalloc; /* The range in the local buffer(s) for received atoms */ int at_start; int at_end; /* The atom indices we need from the surrounding cells. * We can gather the indices over nthread threads. */ int nthread; ind_req_t *ireq; } gmx_domdec_specat_comm_t; typedef struct gmx_domdec_constraints { int *molb_con_offset; int *molb_ncon_mol; /* The fully local and connected constraints */ int ncon; /* The global constraint number, only required for clearing gc_req */ int *con_gl; int *con_nlocat; int con_nalloc; /* Boolean that tells if a global constraint index has been requested */ char *gc_req; /* Global to local communicated constraint atom only index */ gmx_hash_t ga2la; /* Multi-threading stuff */ int nthread; t_ilist *ils; } gmx_domdec_constraints_t; static void dd_move_f_specat(gmx_domdec_t *dd, gmx_domdec_specat_comm_t *spac, rvec *f, rvec *fshift) { gmx_specatsend_t *spas; rvec *vbuf; int n, n0, n1, d, dim, dir, i; ivec vis; int is; gmx_bool bPBC, bScrew; n = spac->at_end; for (d = dd->ndim-1; d >= 0; d--) { dim = dd->dim[d]; if (dd->nc[dim] > 2) { /* Pulse the grid forward and backward */ spas = spac->spas[d]; n0 = spas[0].nrecv; n1 = spas[1].nrecv; n -= n1 + n0; vbuf = spac->vbuf; /* Send and receive the coordinates */ dd_sendrecv2_rvec(dd, d, f+n+n1, n0, vbuf, spas[0].nsend, f+n, n1, vbuf+spas[0].nsend, spas[1].nsend); for (dir = 0; dir < 2; dir++) { bPBC = ((dir == 0 && dd->ci[dim] == 0) || (dir == 1 && dd->ci[dim] == dd->nc[dim]-1)); bScrew = (bPBC && dd->bScrewPBC && dim == XX); spas = &spac->spas[d][dir]; /* Sum the buffer into the required forces */ if (!bPBC || (!bScrew && fshift == NULL)) { for (i = 0; i < spas->nsend; i++) { rvec_inc(f[spas->a[i]], *vbuf); vbuf++; } } else { clear_ivec(vis); vis[dim] = (dir == 0 ? 1 : -1); is = IVEC2IS(vis); if (!bScrew) { /* Sum and add to shift forces */ for (i = 0; i < spas->nsend; i++) { rvec_inc(f[spas->a[i]], *vbuf); rvec_inc(fshift[is], *vbuf); vbuf++; } } else { /* Rotate the forces */ for (i = 0; i < spas->nsend; i++) { f[spas->a[i]][XX] += (*vbuf)[XX]; f[spas->a[i]][YY] -= (*vbuf)[YY]; f[spas->a[i]][ZZ] -= (*vbuf)[ZZ]; if (fshift) { rvec_inc(fshift[is], *vbuf); } vbuf++; } } } } } else { /* Two cells, so we only need to communicate one way */ spas = &spac->spas[d][0]; n -= spas->nrecv; /* Send and receive the coordinates */ dd_sendrecv_rvec(dd, d, dddirForward, f+n, spas->nrecv, spac->vbuf, spas->nsend); /* Sum the buffer into the required forces */ if (dd->bScrewPBC && dim == XX && (dd->ci[dim] == 0 || dd->ci[dim] == dd->nc[dim]-1)) { for (i = 0; i < spas->nsend; i++) { /* Rotate the force */ f[spas->a[i]][XX] += spac->vbuf[i][XX]; f[spas->a[i]][YY] -= spac->vbuf[i][YY]; f[spas->a[i]][ZZ] -= spac->vbuf[i][ZZ]; } } else { for (i = 0; i < spas->nsend; i++) { rvec_inc(f[spas->a[i]], spac->vbuf[i]); } } } } } void dd_move_f_vsites(gmx_domdec_t *dd, rvec *f, rvec *fshift) { if (dd->vsite_comm) { dd_move_f_specat(dd, dd->vsite_comm, f, fshift); } } void dd_clear_f_vsites(gmx_domdec_t *dd, rvec *f) { int i; if (dd->vsite_comm) { for (i = dd->vsite_comm->at_start; i < dd->vsite_comm->at_end; i++) { clear_rvec(f[i]); } } } static void dd_move_x_specat(gmx_domdec_t *dd, gmx_domdec_specat_comm_t *spac, matrix box, rvec *x0, rvec *x1) { gmx_specatsend_t *spas; rvec *x, *vbuf, *rbuf; int nvec, v, n, nn, ns0, ns1, nr0, nr1, nr, d, dim, dir, i; gmx_bool bPBC, bScrew = FALSE; rvec shift = {0, 0, 0}; nvec = 1; if (x1) { nvec++; } n = spac->at_start; for (d = 0; d < dd->ndim; d++) { dim = dd->dim[d]; if (dd->nc[dim] > 2) { /* Pulse the grid forward and backward */ vbuf = spac->vbuf; for (dir = 0; dir < 2; dir++) { if (dir == 0 && dd->ci[dim] == 0) { bPBC = TRUE; bScrew = (dd->bScrewPBC && dim == XX); copy_rvec(box[dim], shift); } else if (dir == 1 && dd->ci[dim] == dd->nc[dim]-1) { bPBC = TRUE; bScrew = (dd->bScrewPBC && dim == XX); for (i = 0; i < DIM; i++) { shift[i] = -box[dim][i]; } } else { bPBC = FALSE; bScrew = FALSE; } spas = &spac->spas[d][dir]; for (v = 0; v < nvec; v++) { x = (v == 0 ? x0 : x1); /* Copy the required coordinates to the send buffer */ if (!bPBC) { /* Only copy */ for (i = 0; i < spas->nsend; i++) { copy_rvec(x[spas->a[i]], *vbuf); vbuf++; } } else if (!bScrew) { /* Shift coordinates */ for (i = 0; i < spas->nsend; i++) { rvec_add(x[spas->a[i]], shift, *vbuf); vbuf++; } } else { /* Shift and rotate coordinates */ for (i = 0; i < spas->nsend; i++) { (*vbuf)[XX] = x[spas->a[i]][XX] + shift[XX]; (*vbuf)[YY] = box[YY][YY] - x[spas->a[i]][YY] + shift[YY]; (*vbuf)[ZZ] = box[ZZ][ZZ] - x[spas->a[i]][ZZ] + shift[ZZ]; vbuf++; } } } } /* Send and receive the coordinates */ spas = spac->spas[d]; ns0 = spas[0].nsend; nr0 = spas[0].nrecv; ns1 = spas[1].nsend; nr1 = spas[1].nrecv; if (nvec == 1) { dd_sendrecv2_rvec(dd, d, spac->vbuf+ns0, ns1, x0+n, nr1, spac->vbuf, ns0, x0+n+nr1, nr0); } else { /* Communicate both vectors in one buffer */ rbuf = spac->vbuf2; dd_sendrecv2_rvec(dd, d, spac->vbuf+2*ns0, 2*ns1, rbuf, 2*nr1, spac->vbuf, 2*ns0, rbuf+2*nr1, 2*nr0); /* Split the buffer into the two vectors */ nn = n; for (dir = 1; dir >= 0; dir--) { nr = spas[dir].nrecv; for (v = 0; v < 2; v++) { x = (v == 0 ? x0 : x1); for (i = 0; i < nr; i++) { copy_rvec(*rbuf, x[nn+i]); rbuf++; } } nn += nr; } } n += nr0 + nr1; } else { spas = &spac->spas[d][0]; /* Copy the required coordinates to the send buffer */ vbuf = spac->vbuf; for (v = 0; v < nvec; v++) { x = (v == 0 ? x0 : x1); if (dd->bScrewPBC && dim == XX && (dd->ci[XX] == 0 || dd->ci[XX] == dd->nc[XX]-1)) { /* Here we only perform the rotation, the rest of the pbc * is handled in the constraint or viste routines. */ for (i = 0; i < spas->nsend; i++) { (*vbuf)[XX] = x[spas->a[i]][XX]; (*vbuf)[YY] = box[YY][YY] - x[spas->a[i]][YY]; (*vbuf)[ZZ] = box[ZZ][ZZ] - x[spas->a[i]][ZZ]; vbuf++; } } else { for (i = 0; i < spas->nsend; i++) { copy_rvec(x[spas->a[i]], *vbuf); vbuf++; } } } /* Send and receive the coordinates */ if (nvec == 1) { dd_sendrecv_rvec(dd, d, dddirBackward, spac->vbuf, spas->nsend, x0+n, spas->nrecv); } else { /* Communicate both vectors in one buffer */ rbuf = spac->vbuf2; dd_sendrecv_rvec(dd, d, dddirBackward, spac->vbuf, 2*spas->nsend, rbuf, 2*spas->nrecv); /* Split the buffer into the two vectors */ nr = spas[0].nrecv; for (v = 0; v < 2; v++) { x = (v == 0 ? x0 : x1); for (i = 0; i < nr; i++) { copy_rvec(*rbuf, x[n+i]); rbuf++; } } } n += spas->nrecv; } } } void dd_move_x_constraints(gmx_domdec_t *dd, matrix box, rvec *x0, rvec *x1) { if (dd->constraint_comm) { dd_move_x_specat(dd, dd->constraint_comm, box, x0, x1); } } void dd_move_x_vsites(gmx_domdec_t *dd, matrix box, rvec *x) { if (dd->vsite_comm) { dd_move_x_specat(dd, dd->vsite_comm, box, x, NULL); } } int *dd_constraints_nlocalatoms(gmx_domdec_t *dd) { if (dd->constraints) { return dd->constraints->con_nlocat; } else { return NULL; } } void dd_clear_local_constraint_indices(gmx_domdec_t *dd) { gmx_domdec_constraints_t *dc; int i; dc = dd->constraints; for (i = 0; i < dc->ncon; i++) { dc->gc_req[dc->con_gl[i]] = 0; } if (dd->constraint_comm) { gmx_hash_clear_and_optimize(dc->ga2la); } } void dd_clear_local_vsite_indices(gmx_domdec_t *dd) { int i; if (dd->vsite_comm) { gmx_hash_clear_and_optimize(dd->ga2la_vsite); } } static int setup_specat_communication(gmx_domdec_t *dd, ind_req_t *ireq, gmx_domdec_specat_comm_t *spac, gmx_hash_t ga2la_specat, int at_start, int vbuf_fac, const char *specat_type, const char *add_err) { int nsend[2], nlast, nsend_zero[2] = {0, 0}, *nsend_ptr; int d, dim, ndir, dir, nr, ns, i, nrecv_local, n0, start, indr, ind, buf[2]; int nat_tot_specat, nat_tot_prev, nalloc_old; gmx_bool bPBC, bFirst; gmx_specatsend_t *spas; if (debug) { fprintf(debug, "Begin setup_specat_communication for %s\n", specat_type); } /* nsend[0]: the number of atoms requested by this node only, * we communicate this for more efficients checks * nsend[1]: the total number of requested atoms */ nsend[0] = ireq->n; nsend[1] = nsend[0]; nlast = nsend[1]; for (d = dd->ndim-1; d >= 0; d--) { /* Pulse the grid forward and backward */ dim = dd->dim[d]; bPBC = (dim < dd->npbcdim); if (dd->nc[dim] == 2) { /* Only 2 cells, so we only need to communicate once */ ndir = 1; } else { ndir = 2; } for (dir = 0; dir < ndir; dir++) { if (!bPBC && dd->nc[dim] > 2 && ((dir == 0 && dd->ci[dim] == dd->nc[dim] - 1) || (dir == 1 && dd->ci[dim] == 0))) { /* No pbc: the fist/last cell should not request atoms */ nsend_ptr = nsend_zero; } else { nsend_ptr = nsend; } /* Communicate the number of indices */ dd_sendrecv_int(dd, d, dir == 0 ? dddirForward : dddirBackward, nsend_ptr, 2, spac->nreq[d][dir], 2); nr = spac->nreq[d][dir][1]; if (nlast+nr > ireq->nalloc) { ireq->nalloc = over_alloc_dd(nlast+nr); srenew(ireq->ind, ireq->nalloc); } /* Communicate the indices */ dd_sendrecv_int(dd, d, dir == 0 ? dddirForward : dddirBackward, ireq->ind, nsend_ptr[1], ireq->ind+nlast, nr); nlast += nr; } nsend[1] = nlast; } if (debug) { fprintf(debug, "Communicated the counts\n"); } /* Search for the requested atoms and communicate the indices we have */ nat_tot_specat = at_start; nrecv_local = 0; for (d = 0; d < dd->ndim; d++) { bFirst = (d == 0); /* Pulse the grid forward and backward */ if (dd->dim[d] >= dd->npbcdim || dd->nc[dd->dim[d]] > 2) { ndir = 2; } else { ndir = 1; } nat_tot_prev = nat_tot_specat; for (dir = ndir-1; dir >= 0; dir--) { if (nat_tot_specat > spac->bSendAtom_nalloc) { nalloc_old = spac->bSendAtom_nalloc; spac->bSendAtom_nalloc = over_alloc_dd(nat_tot_specat); srenew(spac->bSendAtom, spac->bSendAtom_nalloc); for (i = nalloc_old; i < spac->bSendAtom_nalloc; i++) { spac->bSendAtom[i] = FALSE; } } spas = &spac->spas[d][dir]; n0 = spac->nreq[d][dir][0]; nr = spac->nreq[d][dir][1]; if (debug) { fprintf(debug, "dim=%d, dir=%d, searching for %d atoms\n", d, dir, nr); } start = nlast - nr; spas->nsend = 0; nsend[0] = 0; for (i = 0; i < nr; i++) { indr = ireq->ind[start+i]; ind = -1; /* Check if this is a home atom and if so ind will be set */ if (!ga2la_get_home(dd->ga2la, indr, &ind)) { /* Search in the communicated atoms */ ind = gmx_hash_get_minone(ga2la_specat, indr); } if (ind >= 0) { if (i < n0 || !spac->bSendAtom[ind]) { if (spas->nsend+1 > spas->a_nalloc) { spas->a_nalloc = over_alloc_large(spas->nsend+1); srenew(spas->a, spas->a_nalloc); } /* Store the local index so we know which coordinates * to send out later. */ spas->a[spas->nsend] = ind; spac->bSendAtom[ind] = TRUE; if (spas->nsend+1 > spac->ibuf_nalloc) { spac->ibuf_nalloc = over_alloc_large(spas->nsend+1); srenew(spac->ibuf, spac->ibuf_nalloc); } /* Store the global index so we can send it now */ spac->ibuf[spas->nsend] = indr; if (i < n0) { nsend[0]++; } spas->nsend++; } } } nlast = start; /* Clear the local flags */ for (i = 0; i < spas->nsend; i++) { spac->bSendAtom[spas->a[i]] = FALSE; } /* Send and receive the number of indices to communicate */ nsend[1] = spas->nsend; dd_sendrecv_int(dd, d, dir == 0 ? dddirBackward : dddirForward, nsend, 2, buf, 2); if (debug) { fprintf(debug, "Send to node %d, %d (%d) indices, " "receive from node %d, %d (%d) indices\n", dd->neighbor[d][1-dir], nsend[1], nsend[0], dd->neighbor[d][dir], buf[1], buf[0]); if (gmx_debug_at) { for (i = 0; i < spas->nsend; i++) { fprintf(debug, " %d", spac->ibuf[i]+1); } fprintf(debug, "\n"); } } nrecv_local += buf[0]; spas->nrecv = buf[1]; if (nat_tot_specat + spas->nrecv > dd->gatindex_nalloc) { dd->gatindex_nalloc = over_alloc_dd(nat_tot_specat + spas->nrecv); srenew(dd->gatindex, dd->gatindex_nalloc); } /* Send and receive the indices */ dd_sendrecv_int(dd, d, dir == 0 ? dddirBackward : dddirForward, spac->ibuf, spas->nsend, dd->gatindex+nat_tot_specat, spas->nrecv); nat_tot_specat += spas->nrecv; } /* Allocate the x/f communication buffers */ ns = spac->spas[d][0].nsend; nr = spac->spas[d][0].nrecv; if (ndir == 2) { ns += spac->spas[d][1].nsend; nr += spac->spas[d][1].nrecv; } if (vbuf_fac*ns > spac->vbuf_nalloc) { spac->vbuf_nalloc = over_alloc_dd(vbuf_fac*ns); srenew(spac->vbuf, spac->vbuf_nalloc); } if (vbuf_fac == 2 && vbuf_fac*nr > spac->vbuf2_nalloc) { spac->vbuf2_nalloc = over_alloc_dd(vbuf_fac*nr); srenew(spac->vbuf2, spac->vbuf2_nalloc); } /* Make a global to local index for the communication atoms */ for (i = nat_tot_prev; i < nat_tot_specat; i++) { gmx_hash_change_or_set(ga2la_specat, dd->gatindex[i], i); } } /* Check that in the end we got the number of atoms we asked for */ if (nrecv_local != ireq->n) { if (debug) { fprintf(debug, "Requested %d, received %d (tot recv %d)\n", ireq->n, nrecv_local, nat_tot_specat-at_start); if (gmx_debug_at) { for (i = 0; i < ireq->n; i++) { ind = gmx_hash_get_minone(ga2la_specat, ireq->ind[i]); fprintf(debug, " %s%d", (ind >= 0) ? "" : "!", ireq->ind[i]+1); } fprintf(debug, "\n"); } } fprintf(stderr, "\nDD cell %d %d %d: Neighboring cells do not have atoms:", dd->ci[XX], dd->ci[YY], dd->ci[ZZ]); for (i = 0; i < ireq->n; i++) { if (gmx_hash_get_minone(ga2la_specat, ireq->ind[i]) < 0) { fprintf(stderr, " %d", ireq->ind[i]+1); } } fprintf(stderr, "\n"); gmx_fatal(FARGS, "DD cell %d %d %d could only obtain %d of the %d atoms that are connected via %ss from the neighboring cells. This probably means your %s lengths are too long compared to the domain decomposition cell size. Decrease the number of domain decomposition grid cells%s%s.", dd->ci[XX], dd->ci[YY], dd->ci[ZZ], nrecv_local, ireq->n, specat_type, specat_type, add_err, dd->bGridJump ? " or use the -rcon option of mdrun" : ""); } spac->at_start = at_start; spac->at_end = nat_tot_specat; if (debug) { fprintf(debug, "Done setup_specat_communication\n"); } return nat_tot_specat; } static void walk_out(int con, int con_offset, int a, int offset, int nrec, int ncon1, const t_iatom *ia1, const t_iatom *ia2, const t_blocka *at2con, const gmx_ga2la_t ga2la, gmx_bool bHomeConnect, gmx_domdec_constraints_t *dc, gmx_domdec_specat_comm_t *dcc, t_ilist *il_local, ind_req_t *ireq) { int a1_gl, a2_gl, a_loc, i, coni, b; const t_iatom *iap; if (dc->gc_req[con_offset+con] == 0) { /* Add this non-home constraint to the list */ if (dc->ncon+1 > dc->con_nalloc) { dc->con_nalloc = over_alloc_large(dc->ncon+1); srenew(dc->con_gl, dc->con_nalloc); srenew(dc->con_nlocat, dc->con_nalloc); } dc->con_gl[dc->ncon] = con_offset + con; dc->con_nlocat[dc->ncon] = (bHomeConnect ? 1 : 0); dc->gc_req[con_offset+con] = 1; if (il_local->nr + 3 > il_local->nalloc) { il_local->nalloc = over_alloc_dd(il_local->nr+3); srenew(il_local->iatoms, il_local->nalloc); } iap = constr_iatomptr(ncon1, ia1, ia2, con); il_local->iatoms[il_local->nr++] = iap[0]; a1_gl = offset + iap[1]; a2_gl = offset + iap[2]; /* The following indexing code can probably be optizimed */ if (ga2la_get_home(ga2la, a1_gl, &a_loc)) { il_local->iatoms[il_local->nr++] = a_loc; } else { /* We set this index later */ il_local->iatoms[il_local->nr++] = -a1_gl - 1; } if (ga2la_get_home(ga2la, a2_gl, &a_loc)) { il_local->iatoms[il_local->nr++] = a_loc; } else { /* We set this index later */ il_local->iatoms[il_local->nr++] = -a2_gl - 1; } dc->ncon++; } /* Check to not ask for the same atom more than once */ if (gmx_hash_get_minone(dc->ga2la, offset+a) == -1) { assert(dcc); /* Add this non-home atom to the list */ if (ireq->n+1 > ireq->nalloc) { ireq->nalloc = over_alloc_large(ireq->n+1); srenew(ireq->ind, ireq->nalloc); } ireq->ind[ireq->n++] = offset + a; /* Temporarily mark with -2, we get the index later */ gmx_hash_set(dc->ga2la, offset+a, -2); } if (nrec > 0) { for (i = at2con->index[a]; i < at2con->index[a+1]; i++) { coni = at2con->a[i]; if (coni != con) { /* Walk further */ iap = constr_iatomptr(ncon1, ia1, ia2, coni); if (a == iap[1]) { b = iap[2]; } else { b = iap[1]; } if (!ga2la_get_home(ga2la, offset+b, &a_loc)) { walk_out(coni, con_offset, b, offset, nrec-1, ncon1, ia1, ia2, at2con, ga2la, FALSE, dc, dcc, il_local, ireq); } } } } } static void atoms_to_settles(gmx_domdec_t *dd, const gmx_mtop_t *mtop, const int *cginfo, const int **at2settle_mt, int cg_start, int cg_end, t_ilist *ils_local, ind_req_t *ireq) { gmx_ga2la_t ga2la; gmx_mtop_atomlookup_t alook; int settle; int nral, sa; int cg, a, a_gl, a_glsa, a_gls[3], a_locs[3]; int mb, molnr, a_mol, offset; const gmx_molblock_t *molb; const t_iatom *ia1; gmx_bool a_home[3]; int nlocal; gmx_bool bAssign; ga2la = dd->ga2la; alook = gmx_mtop_atomlookup_settle_init(mtop); nral = NRAL(F_SETTLE); for (cg = cg_start; cg < cg_end; cg++) { if (GET_CGINFO_SETTLE(cginfo[cg])) { for (a = dd->cgindex[cg]; a < dd->cgindex[cg+1]; a++) { a_gl = dd->gatindex[a]; gmx_mtop_atomnr_to_molblock_ind(alook, a_gl, &mb, &molnr, &a_mol); molb = &mtop->molblock[mb]; settle = at2settle_mt[molb->type][a_mol]; if (settle >= 0) { offset = a_gl - a_mol; ia1 = mtop->moltype[molb->type].ilist[F_SETTLE].iatoms; bAssign = FALSE; nlocal = 0; for (sa = 0; sa < nral; sa++) { a_glsa = offset + ia1[settle*(1+nral)+1+sa]; a_gls[sa] = a_glsa; a_home[sa] = ga2la_get_home(ga2la, a_glsa, &a_locs[sa]); if (a_home[sa]) { if (nlocal == 0 && a_gl == a_glsa) { bAssign = TRUE; } nlocal++; } } if (bAssign) { if (ils_local->nr+1+nral > ils_local->nalloc) { ils_local->nalloc = over_alloc_dd(ils_local->nr+1+nral); srenew(ils_local->iatoms, ils_local->nalloc); } ils_local->iatoms[ils_local->nr++] = ia1[settle*4]; for (sa = 0; sa < nral; sa++) { if (ga2la_get_home(ga2la, a_gls[sa], &a_locs[sa])) { ils_local->iatoms[ils_local->nr++] = a_locs[sa]; } else { ils_local->iatoms[ils_local->nr++] = -a_gls[sa] - 1; /* Add this non-home atom to the list */ if (ireq->n+1 > ireq->nalloc) { ireq->nalloc = over_alloc_large(ireq->n+1); srenew(ireq->ind, ireq->nalloc); } ireq->ind[ireq->n++] = a_gls[sa]; /* A check on double atom requests is * not required for settle. */ } } } } } } } gmx_mtop_atomlookup_destroy(alook); } static void atoms_to_constraints(gmx_domdec_t *dd, const gmx_mtop_t *mtop, const int *cginfo, const t_blocka *at2con_mt, int nrec, t_ilist *ilc_local, ind_req_t *ireq) { const t_blocka *at2con; gmx_ga2la_t ga2la; gmx_mtop_atomlookup_t alook; int ncon1; gmx_molblock_t *molb; t_iatom *ia1, *ia2, *iap; int nhome, cg, a, a_gl, a_mol, a_loc, b_lo, offset, mb, molnr, b_mol, i, con, con_offset; gmx_domdec_constraints_t *dc; gmx_domdec_specat_comm_t *dcc; dc = dd->constraints; dcc = dd->constraint_comm; ga2la = dd->ga2la; alook = gmx_mtop_atomlookup_init(mtop); nhome = 0; for (cg = 0; cg < dd->ncg_home; cg++) { if (GET_CGINFO_CONSTR(cginfo[cg])) { for (a = dd->cgindex[cg]; a < dd->cgindex[cg+1]; a++) { a_gl = dd->gatindex[a]; gmx_mtop_atomnr_to_molblock_ind(alook, a_gl, &mb, &molnr, &a_mol); molb = &mtop->molblock[mb]; ncon1 = mtop->moltype[molb->type].ilist[F_CONSTR].nr/NRAL(F_SETTLE); ia1 = mtop->moltype[molb->type].ilist[F_CONSTR].iatoms; ia2 = mtop->moltype[molb->type].ilist[F_CONSTRNC].iatoms; /* Calculate the global constraint number offset for the molecule. * This is only required for the global index to make sure * that we use each constraint only once. */ con_offset = dc->molb_con_offset[mb] + molnr*dc->molb_ncon_mol[mb]; /* The global atom number offset for this molecule */ offset = a_gl - a_mol; at2con = &at2con_mt[molb->type]; for (i = at2con->index[a_mol]; i < at2con->index[a_mol+1]; i++) { con = at2con->a[i]; iap = constr_iatomptr(ncon1, ia1, ia2, con); if (a_mol == iap[1]) { b_mol = iap[2]; } else { b_mol = iap[1]; } if (ga2la_get_home(ga2la, offset+b_mol, &a_loc)) { /* Add this fully home constraint at the first atom */ if (a_mol < b_mol) { if (dc->ncon+1 > dc->con_nalloc) { dc->con_nalloc = over_alloc_large(dc->ncon+1); srenew(dc->con_gl, dc->con_nalloc); srenew(dc->con_nlocat, dc->con_nalloc); } dc->con_gl[dc->ncon] = con_offset + con; dc->con_nlocat[dc->ncon] = 2; if (ilc_local->nr + 3 > ilc_local->nalloc) { ilc_local->nalloc = over_alloc_dd(ilc_local->nr + 3); srenew(ilc_local->iatoms, ilc_local->nalloc); } b_lo = a_loc; ilc_local->iatoms[ilc_local->nr++] = iap[0]; ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? a : b_lo); ilc_local->iatoms[ilc_local->nr++] = (a_gl == iap[1] ? b_lo : a ); dc->ncon++; nhome++; } } else { /* We need the nrec constraints coupled to this constraint, * so we need to walk out of the home cell by nrec+1 atoms, * since already atom bg is not locally present. * Therefore we call walk_out with nrec recursions to go * after this first call. */ walk_out(con, con_offset, b_mol, offset, nrec, ncon1, ia1, ia2, at2con, dd->ga2la, TRUE, dc, dcc, ilc_local, ireq); } } } } } gmx_mtop_atomlookup_destroy(alook); if (debug) { fprintf(debug, "Constraints: home %3d border %3d atoms: %3d\n", nhome, dc->ncon-nhome, dd->constraint_comm ? ireq->n : 0); } } int dd_make_local_constraints(gmx_domdec_t *dd, int at_start, const gmx_mtop_t *mtop, const int *cginfo, gmx_constr_t constr, int nrec, t_ilist *il_local) { gmx_domdec_constraints_t *dc; t_ilist *ilc_local, *ils_local; ind_req_t *ireq; const t_blocka *at2con_mt; const int **at2settle_mt; gmx_hash_t ga2la_specat; int at_end, i, j; t_iatom *iap; dc = dd->constraints; ilc_local = &il_local[F_CONSTR]; ils_local = &il_local[F_SETTLE]; dc->ncon = 0; ilc_local->nr = 0; if (dd->constraint_comm) { at2con_mt = atom2constraints_moltype(constr); ireq = &dd->constraint_comm->ireq[0]; ireq->n = 0; } else { at2con_mt = NULL; ireq = NULL; } if (dd->bInterCGsettles) { at2settle_mt = atom2settle_moltype(constr); ils_local->nr = 0; } else { /* Settle works inside charge groups, we assigned them already */ at2settle_mt = NULL; } if (at2settle_mt == NULL) { atoms_to_constraints(dd, mtop, cginfo, at2con_mt, nrec, ilc_local, ireq); } else { int t0_set; int thread; /* Do the constraints, if present, on the first thread. * Do the settles on all other threads. */ t0_set = ((at2con_mt != NULL && dc->nthread > 1) ? 1 : 0); #pragma omp parallel for num_threads(dc->nthread) schedule(static) for (thread = 0; thread < dc->nthread; thread++) { if (at2con_mt && thread == 0) { atoms_to_constraints(dd, mtop, cginfo, at2con_mt, nrec, ilc_local, ireq); } if (thread >= t0_set) { int cg0, cg1; t_ilist *ilst; ind_req_t *ireqt; /* Distribute the settle check+assignments over * dc->nthread or dc->nthread-1 threads. */ cg0 = (dd->ncg_home*(thread-t0_set ))/(dc->nthread-t0_set); cg1 = (dd->ncg_home*(thread-t0_set+1))/(dc->nthread-t0_set); if (thread == t0_set) { ilst = ils_local; } else { ilst = &dc->ils[thread]; } ilst->nr = 0; ireqt = &dd->constraint_comm->ireq[thread]; if (thread > 0) { ireqt->n = 0; } atoms_to_settles(dd, mtop, cginfo, at2settle_mt, cg0, cg1, ilst, ireqt); } } /* Combine the generate settles and requested indices */ for (thread = 1; thread < dc->nthread; thread++) { t_ilist *ilst; ind_req_t *ireqt; int ia; if (thread > t0_set) { ilst = &dc->ils[thread]; if (ils_local->nr + ilst->nr > ils_local->nalloc) { ils_local->nalloc = over_alloc_large(ils_local->nr + ilst->nr); srenew(ils_local->iatoms, ils_local->nalloc); } for (ia = 0; ia < ilst->nr; ia++) { ils_local->iatoms[ils_local->nr+ia] = ilst->iatoms[ia]; } ils_local->nr += ilst->nr; } ireqt = &dd->constraint_comm->ireq[thread]; if (ireq->n+ireqt->n > ireq->nalloc) { ireq->nalloc = over_alloc_large(ireq->n+ireqt->n); srenew(ireq->ind, ireq->nalloc); } for (ia = 0; ia < ireqt->n; ia++) { ireq->ind[ireq->n+ia] = ireqt->ind[ia]; } ireq->n += ireqt->n; } if (debug) { fprintf(debug, "Settles: total %3d\n", ils_local->nr/4); } } if (dd->constraint_comm) { int nral1; at_end = setup_specat_communication(dd, ireq, dd->constraint_comm, dd->constraints->ga2la, at_start, 2, "constraint", " or lincs-order"); /* Fill in the missing indices */ ga2la_specat = dd->constraints->ga2la; nral1 = 1 + NRAL(F_CONSTR); for (i = 0; i < ilc_local->nr; i += nral1) { iap = ilc_local->iatoms + i; for (j = 1; j < nral1; j++) { if (iap[j] < 0) { iap[j] = gmx_hash_get_minone(ga2la_specat, -iap[j]-1); } } } nral1 = 1 + NRAL(F_SETTLE); for (i = 0; i < ils_local->nr; i += nral1) { iap = ils_local->iatoms + i; for (j = 1; j < nral1; j++) { if (iap[j] < 0) { iap[j] = gmx_hash_get_minone(ga2la_specat, -iap[j]-1); } } } } else { at_end = at_start; } return at_end; } int dd_make_local_vsites(gmx_domdec_t *dd, int at_start, t_ilist *lil) { gmx_domdec_specat_comm_t *spac; ind_req_t *ireq; gmx_hash_t ga2la_specat; int ftype, nral, i, j, gat, a; t_ilist *lilf; t_iatom *iatoms; int at_end; spac = dd->vsite_comm; ireq = &spac->ireq[0]; ga2la_specat = dd->ga2la_vsite; ireq->n = 0; /* Loop over all the home vsites */ for (ftype = 0; ftype < F_NRE; ftype++) { if (interaction_function[ftype].flags & IF_VSITE) { nral = NRAL(ftype); lilf = &lil[ftype]; for (i = 0; i < lilf->nr; i += 1+nral) { iatoms = lilf->iatoms + i; /* Check if we have the other atoms */ for (j = 1; j < 1+nral; j++) { if (iatoms[j] < 0) { /* This is not a home atom, * we need to ask our neighbors. */ a = -iatoms[j] - 1; /* Check to not ask for the same atom more than once */ if (gmx_hash_get_minone(dd->ga2la_vsite, a) == -1) { /* Add this non-home atom to the list */ if (ireq->n+1 > ireq->nalloc) { ireq->nalloc = over_alloc_large(ireq->n+1); srenew(ireq->ind, ireq->nalloc); } ireq->ind[ireq->n++] = a; /* Temporarily mark with -2, * we get the index later. */ gmx_hash_set(ga2la_specat, a, -2); } } } } } } at_end = setup_specat_communication(dd, ireq, dd->vsite_comm, ga2la_specat, at_start, 1, "vsite", ""); /* Fill in the missing indices */ for (ftype = 0; ftype < F_NRE; ftype++) { if (interaction_function[ftype].flags & IF_VSITE) { nral = NRAL(ftype); lilf = &lil[ftype]; for (i = 0; i < lilf->nr; i += 1+nral) { iatoms = lilf->iatoms + i; for (j = 1; j < 1+nral; j++) { if (iatoms[j] < 0) { iatoms[j] = gmx_hash_get_minone(ga2la_specat, -iatoms[j]-1); } } } } } return at_end; } static gmx_domdec_specat_comm_t *specat_comm_init(int nthread) { gmx_domdec_specat_comm_t *spac; snew(spac, 1); spac->nthread = nthread; snew(spac->ireq, spac->nthread); return spac; } void init_domdec_constraints(gmx_domdec_t *dd, gmx_mtop_t *mtop, gmx_constr_t constr) { gmx_domdec_constraints_t *dc; gmx_molblock_t *molb; int mb, ncon, c, a; if (debug) { fprintf(debug, "Begin init_domdec_constraints\n"); } snew(dd->constraints, 1); dc = dd->constraints; snew(dc->molb_con_offset, mtop->nmolblock); snew(dc->molb_ncon_mol, mtop->nmolblock); ncon = 0; for (mb = 0; mb < mtop->nmolblock; mb++) { molb = &mtop->molblock[mb]; dc->molb_con_offset[mb] = ncon; dc->molb_ncon_mol[mb] = mtop->moltype[molb->type].ilist[F_CONSTR].nr/3 + mtop->moltype[molb->type].ilist[F_CONSTRNC].nr/3; ncon += molb->nmol*dc->molb_ncon_mol[mb]; } if (ncon > 0) { snew(dc->gc_req, ncon); for (c = 0; c < ncon; c++) { dc->gc_req[c] = 0; } } /* Use a hash table for the global to local index. * The number of keys is a rough estimate, it will be optimized later. */ dc->ga2la = gmx_hash_init(min(mtop->natoms/20, mtop->natoms/(2*dd->nnodes))); dc->nthread = gmx_omp_nthreads_get(emntDomdec); snew(dc->ils, dc->nthread); dd->constraint_comm = specat_comm_init(dc->nthread); } void init_domdec_vsites(gmx_domdec_t *dd, int n_intercg_vsite) { int i; gmx_domdec_constraints_t *dc; if (debug) { fprintf(debug, "Begin init_domdec_vsites\n"); } /* Use a hash table for the global to local index. * The number of keys is a rough estimate, it will be optimized later. */ dd->ga2la_vsite = gmx_hash_init(min(n_intercg_vsite/20, n_intercg_vsite/(2*dd->nnodes))); dd->vsite_comm = specat_comm_init(1); }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit; protected: /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S) { PreInit = S; } OMPClauseWithPreInit(const OMPClause *This) : PreInit(nullptr) { assert(get(This) && "get is not tuned for pre-init."); } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate; protected: /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This), PostUpdate(nullptr) { assert(get(This) && "get is not tuned for post-update."); } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Location of ':' (if any). SourceLocation ColonLoc; /// \brief Directive name modifier for the clause. OpenMPDirectiveKind NameModifier; /// \brief Name modifier location. SourceLocation NameModifierLoc; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } /// \brief Set directive name modifier for the clause. /// void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// \brief Set location of directive name modifier for the clause. /// void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// \brief Set location of ':'. /// void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. /// OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(), Condition(nullptr), ColonLoc(), NameModifier(OMPD_unknown), NameModifierLoc() {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// \brief Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// \brief Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } child_range children() { return child_range(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } child_range children() { return child_range(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } child_range children() { return child_range(&Safelen, &Safelen + 1); } }; /// \brief This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. /// class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Simdlen; /// \brief Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// \brief Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// \brief Build an empty clause. /// explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Simdlen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// \brief Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Set the first schedule modifier. /// /// \param M Schedule modifier. /// void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// \brief Set the second schedule modifier. /// /// \param M Schedule modifier. /// void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// \brief Set location of the first schedule modifier. /// void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// \brief Set location of the second schedule modifier. /// void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// \brief Set schedule modifier location. /// /// \param M Schedule modifier location. /// void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this), Kind(OMPC_SCHEDULE_unknown), ChunkSize(nullptr) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get the first modifier of the clause. /// OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// \brief Get the second modifier of the clause. /// OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get the first modifier location. /// SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// \brief Get the second modifier location. /// SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. /// const Expr *getChunkSize() const { return ChunkSize; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. /// class OMPOrderedClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. /// class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. /// class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// \brief Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier; /// \brief Location of linear modifier if any. SourceLocation ModifierLoc; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this), Modifier(OMPC_LINEAR_val), ModifierLoc(), ColonLoc() {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } /// MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// \brief Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// \brief Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator privates_iterator; typedef ArrayRef<const Expr *>::iterator privates_const_iterator; typedef llvm::iterator_range<privates_iterator> privates_range; typedef llvm::iterator_range<privates_const_iterator> privates_const_range; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm::iterator_range<updates_iterator> updates_range; typedef llvm::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm::iterator_range<finals_iterator> finals_range; typedef llvm::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. /// class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. /// class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// \brief This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. /// class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind; /// \brief Dependency type location. SourceLocation DepLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPDependClause(unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// \brief Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// static OMPDependClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// \brief Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// \brief This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. /// class OMPDeviceClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Device number. Stmt *Device; /// \brief Set the device number. /// /// \param E Device number. /// void setDevice(Expr *E) { Device = E; } public: /// \brief Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDeviceClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), LParenLoc(LParenLoc), Device(E) {} /// \brief Build an empty clause. /// OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Device(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// \brief Return device number. Expr *getDevice() const { return cast<Expr>(Device); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } child_range children() { return child_range(&Device, &Device + 1); } }; /// \brief This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. /// class OMPThreadsClause : public OMPClause { public: /// \brief Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. /// class OMPSIMDClause : public OMPClause { public: /// \brief Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: // \brief Class that represents a component of a mappable expression. E.g. // for an expression S.a, the first component is a declaration reference // expression associated with 'S' and the second is a member expression // associated with the field declaration 'a'. If the expression is an array // subscript it may not have any associated declaration. In that case the // associated declaration is set to nullptr. class MappableComponent { // \brief Expression associated with the component. Expr *AssociatedExpression = nullptr; // \brief Declaration associated with the declaration. If the component does // not have a declaration (e.g. array subscripts or section), this is set to // nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() {} explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // \brief List of components of an expression. This first one is the whole // expression and the last one is the base expression. typedef SmallVector<MappableComponent, 8> MappableExprComponentList; typedef ArrayRef<MappableComponent> MappableExprComponentListRef; // \brief List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. typedef SmallVector<MappableExprComponentList, 8> MappableExprComponentLists; typedef ArrayRef<MappableExprComponentList> MappableExprComponentListsRef; protected: // \brief Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // \brief Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<ValueDecl *> Declarations); }; /// \brief This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// \brief Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// \brief Number of component lists in this clause. unsigned NumComponentLists; /// \brief Total number of components in this clause. unsigned NumComponents; protected: /// \brief Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// \brief Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// \brief Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::DenseMap<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// \brief Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause - one /// list for each expression in the clause. /// \param NumComponents Total number of expression components in the clause. /// OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} public: /// \brief Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// \brief Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// \brief Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// \brief Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// \brief Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), RemainingLists(0u), PrevListSize(0u), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// \brief Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; typedef llvm::iterator_range<const_component_lists_iterator> const_component_lists_range; /// \brief Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// \brief Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. typedef ArrayRef<ValueDecl *>::iterator const_all_decls_iterator; typedef llvm::iterator_range<const_all_decls_iterator> const_all_decls_range; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } typedef ArrayRef<unsigned>::iterator const_all_num_lists_iterator; typedef llvm::iterator_range<const_all_num_lists_iterator> const_all_num_lists_range; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } typedef ArrayRef<unsigned>::iterator const_all_lists_sizes_iterator; typedef llvm::iterator_range<const_all_lists_sizes_iterator> const_all_lists_sizes_range; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } typedef ArrayRef<MappableComponent>::iterator const_all_components_iterator; typedef llvm::iterator_range<const_all_components_iterator> const_all_components_range; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } }; /// \brief This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. /// class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Map type modifier for the 'map' clause. OpenMPMapClauseKind MapTypeModifier; /// \brief Map type for the 'map' clause. OpenMPMapClauseKind MapType; /// \brief Is this an implicit map type or not. bool MapTypeIsImplicit; /// \brief Location of the map type. SourceLocation MapLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Set type modifier for the clause. /// /// \param T Type Modifier for the clause. /// void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; } /// \brief Set type for the clause. /// /// \param T Type for the clause. /// void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// \brief Set type location. /// /// \param TLoc Type location. /// void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapTypeModifier Map type modifier. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(MapTypeModifier), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(OMPC_MAP_unknown), MapType(OMPC_MAP_unknown), MapTypeIsImplicit(false), MapLoc() {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param TypeModifier Map type modifier. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. /// static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, OpenMPMapClauseKind TypeModifier, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// \brief Creates an empty clause with the place for for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); /// \brief Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// \brief Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// \brief Fetches the map type modifier for the clause. OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY { return MapTypeModifier; } /// \brief Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// \brief This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. /// class OMPNumTeamsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief NumTeams number. Stmt *NumTeams; /// \brief Set the NumTeams number. /// /// \param E NumTeams number. /// void setNumTeams(Expr *E) { NumTeams = E; } public: /// \brief Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumTeamsClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTeams(E) {} /// \brief Build an empty clause. /// OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumTeams(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// \brief Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } }; /// \brief This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. /// class OMPThreadLimitClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief ThreadLimit number. Stmt *ThreadLimit; /// \brief Set the ThreadLimit number. /// /// \param E ThreadLimit number. /// void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// \brief Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPThreadLimitClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), LParenLoc(LParenLoc), ThreadLimit(E) {} /// \brief Build an empty clause. /// OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), ThreadLimit(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// \brief Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } }; /// \brief This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. /// class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Priority number. Stmt *Priority; /// \brief Set the Priority number. /// /// \param E Priority number. /// void setPriority(Expr *E) { Priority = E; } public: /// \brief Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// \brief Build an empty clause. /// OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Priority(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// \brief Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } child_range children() { return child_range(&Priority, &Priority + 1); } }; /// \brief This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. /// class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Grainsize; /// \brief Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// \brief Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// \brief Build an empty clause. /// explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Grainsize(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } }; /// \brief This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. /// class OMPNogroupClause : public OMPClause { public: /// \brief Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. /// class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *NumTasks; /// \brief Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// \brief Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// \brief Build an empty clause. /// explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumTasks(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } }; /// \brief This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. /// class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Hint expression of the 'hint' clause. Stmt *Hint; /// \brief Set hint expression. /// void setHint(Expr *H) { Hint = H; } public: /// \brief Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// \brief Build an empty clause. /// OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Hint(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } child_range children() { return child_range(&Hint, &Hint + 1); } }; /// \brief This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. /// class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind; /// \brief Start location of the schedule kind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// \brief Build an empty clause. /// explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this), Kind(OMPC_DIST_SCHEDULE_unknown), ChunkSize(nullptr) {} /// \brief Get kind of the clause. /// OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. /// const Expr *getChunkSize() const { return ChunkSize; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } }; /// \brief This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. /// class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier; /// \brief Locations of modifiers. SourceLocation ModifierLoc; /// \brief A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind; /// \brief Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// \brief Set defaultmap kind. /// /// \param K Defaultmap kind. /// void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// \brief Set the defaultmap modifier. /// /// \param M Defaultmap modifier. /// void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// \brief Set location of the defaultmap modifier. /// void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. /// void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// \brief Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier /// OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// \brief Build an empty clause. /// explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()), Modifier(OMPC_DEFAULTMAP_MODIFIER_unknown), Kind(OMPC_DEFAULTMAP_unknown) {} /// \brief Get kind of the clause. /// OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// \brief Get the modifier of the clause. /// OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// \brief Get the modifier location. /// SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; } // end namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
network_simplex_simple_omp.h
/* -*- mode: C++; indent-tabs-mode: nil; -*- * * * This file has been adapted by Nicolas Bonneel (2013), * from network_simplex.h from LEMON, a generic C++ optimization library, * to implement a lightweight network simplex for mass transport, more * memory efficient than the original file. A previous version of this file * is used as part of the Displacement Interpolation project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * Revisions: * March 2015: added OpenMP parallelization * March 2017: included Antoine Rolet's trick to make it more robust * April 2018: IMPORTANT bug fix + uses 64bit integers (slightly slower but less risks of overflows), updated to a newer version of the algo by LEMON, sparse flow by default + minor edits. * * **** Original file Copyright Notice : * * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #pragma once #undef DEBUG_LVL #define DEBUG_LVL 0 #if DEBUG_LVL>0 #include <iomanip> #endif #undef EPSILON #undef _EPSILON #undef MAX_DEBUG_ITER #define EPSILON std::numeric_limits<Cost>::epsilon() #define _EPSILON 1e-14 #define MAX_DEBUG_ITER 100000 /// \ingroup min_cost_flow_algs /// /// \file /// \brief Network Simplex algorithm for finding a minimum cost flow. // if your compiler has troubles with unorderedmaps, just comment the following line to use a slower std::map instead #define HASHMAP // now handled with unorderedmaps instead of stdext::hash_map. Should be better supported. #define SPARSE_FLOW // a sparse flow vector will be 10-15% slower for small problems but uses less memory and becomes faster for large problems (40k total nodes) #include <vector> #include <limits> #include <algorithm> #include <iostream> #ifdef HASHMAP #include <unordered_map> #else #include <map> #endif //#include "core.h" //#include "lmath.h" #ifdef OMP #include <omp.h> #endif #include <cmath> //#include "sparse_array_n.h" #include "full_bipartitegraph_omp.h" #undef INVALIDNODE #undef INVALID #define INVALIDNODE -1 #define INVALID (-1) namespace lemon_omp { int64_t max_threads = -1; template <typename T> class ProxyObject; template<typename T> class SparseValueVector { public: SparseValueVector(size_t n = 0) // parameter n for compatibility with standard vectors { } void resize(size_t n = 0) {}; T operator[](const size_t id) const { #ifdef HASHMAP typename std::unordered_map<size_t, T>::const_iterator it = data.find(id); #else typename std::map<size_t, T>::const_iterator it = data.find(id); #endif if (it == data.end()) return 0; else return it->second; } ProxyObject<T> operator[](const size_t id) { return ProxyObject<T>(this, id); } //private: #ifdef HASHMAP std::unordered_map<size_t, T> data; #else std::map<size_t, T> data; #endif }; template <typename T> class ProxyObject { public: ProxyObject(SparseValueVector<T> *v, size_t idx) { _v = v; _idx = idx; }; ProxyObject<T> & operator=(const T &v) { // If we get here, we know that operator[] was called to perform a write access, // so we can insert an item in the vector if needed if (v != 0) _v->data[_idx] = v; return *this; } operator T() { // If we get here, we know that operator[] was called to perform a read access, // so we can simply return the existing object #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) return 0; else return it->second; } void operator+=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = val; else { T sum = it->second + val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } void operator-=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = -val; else { T sum = it->second - val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } SparseValueVector<T> *_v; size_t _idx; }; /// \addtogroup min_cost_flow_algs /// @{ /// \brief Implementation of the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow". /// /// \ref NetworkSimplexSimple implements the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow" /// \ref amo93networkflows, \ref dantzig63linearprog, /// \ref kellyoneill91netsimplex. /// This algorithm is a highly efficient specialized version of the /// linear programming simplex method directly for the minimum cost /// flow problem. /// /// In general, %NetworkSimplexSimple is the fastest implementation available /// in LEMON for this problem. /// Moreover, it supports both directions of the supply/demand inequality /// constraints. For more information, see \ref SupplyType. /// /// Most of the parameters of the problem (except for the digraph) /// can be given using separate functions, and the algorithm can be /// executed using the \ref run() function. If some parameters are not /// specified, then default values will be used. /// /// \tparam GR The digraph type the algorithm runs on. /// \tparam V The number type used for flow amounts, capacity bounds /// and supply values in the algorithm. By default, it is \c int. /// \tparam C The number type used for costs and potentials in the /// algorithm. By default, it is the same as \c V. /// /// \warning Both number types must be signed and all input data must /// be integer. /// /// \note %NetworkSimplexSimple provides five different pivot rule /// implementations, from which the most efficient one is used /// by default. For more information, see \ref PivotRule. template <typename GR, typename V = int, typename C = V, typename ArcsType = int64_t> class NetworkSimplexSimple { public: /// \brief Constructor. /// /// The constructor of the class. /// /// \param graph The digraph the algorithm runs on. /// \param arc_mixing Indicate if the arcs have to be stored in a /// mixed order in the internal data structure. /// In special cases, it could lead to better overall performance, /// but it is usually slower. Therefore it is disabled by default. NetworkSimplexSimple(const GR& graph, bool arc_mixing, int nbnodes, ArcsType nb_arcs, uint64_t maxiters = 0, int numThreads=-1) : _graph(graph), //_arc_id(graph), _arc_mixing(arc_mixing), _init_nb_nodes(nbnodes), _init_nb_arcs(nb_arcs), MAX(std::numeric_limits<Value>::max()), INF(std::numeric_limits<Value>::has_infinity ? std::numeric_limits<Value>::infinity() : MAX) { // Reset data structures reset(); max_iter = maxiters; #ifdef OMP if (max_threads < 0) { max_threads = omp_get_max_threads(); } if (numThreads > 0 && numThreads<=max_threads){ num_threads = numThreads; } else if (numThreads == -1 || numThreads>max_threads) { num_threads = max_threads; } else { num_threads = 1; } omp_set_num_threads(num_threads); #else num_threads = 1; #endif } /// The type of the flow amounts, capacity bounds and supply values typedef V Value; /// The type of the arc costs typedef C Cost; public: /// \brief Problem type constants for the \c run() function. /// /// Enum type containing the problem type constants that can be /// returned by the \ref run() function of the algorithm. enum ProblemType { /// The problem has no feasible solution (flow). INFEASIBLE, /// The problem has optimal solution (i.e. it is feasible and /// bounded), and the algorithm has found optimal flow and node /// potentials (primal and dual solutions). OPTIMAL, /// The objective function of the problem is unbounded, i.e. /// there is a directed cycle having negative total cost and /// infinite upper bound. UNBOUNDED, // The maximum number of iteration has been reached MAX_ITER_REACHED }; /// \brief Constants for selecting the type of the supply constraints. /// /// Enum type containing constants for selecting the supply type, /// i.e. the direction of the inequalities in the supply/demand /// constraints of the \ref min_cost_flow "minimum cost flow problem". /// /// The default supply type is \c GEQ, the \c LEQ type can be /// selected using \ref supplyType(). /// The equality form is a special case of both supply types. enum SupplyType { /// This option means that there are <em>"greater or equal"</em> /// supply/demand constraints in the definition of the problem. GEQ, /// This option means that there are <em>"less or equal"</em> /// supply/demand constraints in the definition of the problem. LEQ }; private: uint64_t max_iter; int num_threads; TEMPLATE_DIGRAPH_TYPEDEFS(GR); typedef std::vector<int> IntVector; typedef std::vector<ArcsType> ArcVector; typedef std::vector<Value> ValueVector; typedef std::vector<Cost> CostVector; // typedef SparseValueVector<Cost> CostVector; typedef std::vector<char> BoolVector; // Note: vector<char> is used instead of vector<bool> for efficiency reasons // State constants for arcs enum ArcState { STATE_UPPER = -1, STATE_TREE = 0, STATE_LOWER = 1 }; typedef std::vector<signed char> StateVector; // Note: vector<signed char> is used instead of vector<ArcState> for // efficiency reasons private: // Data related to the underlying digraph const GR &_graph; int _node_num; ArcsType _arc_num; ArcsType _all_arc_num; ArcsType _search_arc_num; // Parameters of the problem SupplyType _stype; Value _sum_supply; inline int _node_id(int n) const { return _node_num - n - 1; }; //IntArcMap _arc_id; IntVector _source; // keep nodes as integers IntVector _target; bool _arc_mixing; // Node and arc data CostVector _cost; ValueVector _supply; #ifdef SPARSE_FLOW SparseValueVector<Value> _flow; #else ValueVector _flow; #endif CostVector _pi; // Data for storing the spanning tree structure IntVector _parent; ArcVector _pred; IntVector _thread; IntVector _rev_thread; IntVector _succ_num; IntVector _last_succ; IntVector _dirty_revs; BoolVector _forward; StateVector _state; ArcsType _root; // Temporary data used in the current pivot iteration ArcsType in_arc, join, u_in, v_in, u_out, v_out; ArcsType first, second, right, last; ArcsType stem, par_stem, new_stem; Value delta; const Value MAX; ArcsType mixingCoeff; public: /// \brief Constant for infinite upper bounds (capacities). /// /// Constant for infinite upper bounds (capacities). /// It is \c std::numeric_limits<Value>::infinity() if available, /// \c std::numeric_limits<Value>::max() otherwise. const Value INF; private: // thank you to DVK and MizardX from StackOverflow for this function! inline ArcsType sequence(ArcsType k) const { ArcsType smallv = (k > num_total_big_subsequence_numbers) & 1; k -= num_total_big_subsequence_numbers * smallv; ArcsType subsequence_length2 = subsequence_length - smallv; ArcsType subsequence_num = (k / subsequence_length2) + num_big_subsequences * smallv; ArcsType subsequence_offset = (k % subsequence_length2) * mixingCoeff; return subsequence_offset + subsequence_num; } ArcsType subsequence_length; ArcsType num_big_subsequences; ArcsType num_total_big_subsequence_numbers; inline ArcsType getArcID(const Arc &arc) const { //int n = _arc_num-arc._id-1; ArcsType n = _arc_num - GR::id(arc) - 1; //ArcsType a = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff; //ArcsType b = _arc_id[arc]; if (_arc_mixing) return sequence(n); else return n; } // finally unused because too slow inline ArcsType getSource(const ArcsType arc) const { //ArcsType a = _source[arc]; //return a; ArcsType n = _arc_num - arc - 1; if (_arc_mixing) n = mixingCoeff*(n%mixingCoeff) + n / mixingCoeff; ArcsType b; if (n >= 0) b = _node_id(_graph.source(GR::arcFromId(n))); else { n = arc + 1 - _arc_num; if (n <= _node_num) b = _node_num; else if (n >= _graph._n1) b = _graph._n1; else b = _graph._n1 - n; } return b; } // Implementation of the Block Search pivot rule class BlockSearchPivotRule { private: // References to the NetworkSimplexSimple class const IntVector &_source; const IntVector &_target; const CostVector &_cost; const StateVector &_state; const CostVector &_pi; ArcsType &_in_arc; ArcsType _search_arc_num; // Pivot rule data ArcsType _block_size; ArcsType _next_arc; NetworkSimplexSimple &_ns; public: // Constructor BlockSearchPivotRule(NetworkSimplexSimple &ns) : _source(ns._source), _target(ns._target), _cost(ns._cost), _state(ns._state), _pi(ns._pi), _in_arc(ns.in_arc), _search_arc_num(ns._search_arc_num), _next_arc(0), _ns(ns) { // The main parameters of the pivot rule const double BLOCK_SIZE_FACTOR = 1; const ArcsType MIN_BLOCK_SIZE = 10; _block_size = std::max(ArcsType(BLOCK_SIZE_FACTOR * std::sqrt(double(_search_arc_num))), MIN_BLOCK_SIZE); } // Find next entering arc bool findEnteringArc() { Cost min_val = 0; ArcsType N = _ns.num_threads; std::vector<Cost> minArray(N, 0); std::vector<ArcsType> arcId(N); ArcsType bs = (ArcsType)ceil(_block_size / (double)N); for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType e; int j; #pragma omp parallel { #ifdef OMP int t = omp_get_thread_num(); #else int t = 0; #endif #pragma omp for schedule(static, bs) lastprivate(e) for (j = 0; j < std::min(i + _block_size, _search_arc_num) - i; j++) { e = (_next_arc + i + j); if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minArray[t]) { minArray[t] = c; arcId[t] = e; } } } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } Cost a = std::abs(_pi[_source[_in_arc]]) > std::abs(_pi[_target[_in_arc]]) ? std::abs(_pi[_source[_in_arc]]) : std::abs(_pi[_target[_in_arc]]); a = a > std::abs(_cost[_in_arc]) ? a : std::abs(_cost[_in_arc]); if (min_val < -EPSILON*a) { _next_arc = e; return true; } } Cost a = fabs(_pi[_source[_in_arc]]) > fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]) : fabs(_pi[_target[_in_arc]]); a = a > fabs(_cost[_in_arc]) ? a : fabs(_cost[_in_arc]); if (min_val >= -EPSILON*a) return false; return true; } // Find next entering arc /*bool findEnteringArc() { Cost min_val = 0; int N = omp_get_max_threads(); std::vector<Cost> minArray(N); std::vector<ArcsType> arcId(N); ArcsType bs = (ArcsType)ceil(_block_size / (double)N); for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType maxJ = std::min(i + _block_size, _search_arc_num) - i; ArcsType j; #pragma omp parallel { int t = omp_get_thread_num(); Cost minV = 0; ArcsType arcStart = _next_arc + i; ArcsType arc = -1; #pragma omp for schedule(static, bs) for (j = 0; j < maxJ; j++) { ArcsType e = arcStart + j; if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minV) { minV = c; arc = e; } } minArray[t] = minV; arcId[t] = arc; } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } //FIX by Antoine Rolet to avoid precision issues Cost a = std::max(std::abs(_cost[_in_arc]), std::max(std::abs(_pi[_source[_in_arc]]), std::abs(_pi[_target[_in_arc]]))); if (min_val <-std::numeric_limits<Cost>::epsilon()*a) { _next_arc = _next_arc + i + maxJ - 1; if (_next_arc >= _search_arc_num) _next_arc -= _search_arc_num; return true; } } if (min_val >= 0) { return false; } return true; }*/ /*bool findEnteringArc() { Cost c, min = 0; int cnt = _block_size; int e, min_arc = _next_arc; for (e = _next_arc; e < _search_arc_num; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; min_arc = e; } if (--cnt == 0) { if (min < 0) break; cnt = _block_size; } } if (min == 0 || cnt > 0) { for (e = 0; e < _next_arc; ++e) { c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < min) { min = c; min_arc = e; } if (--cnt == 0) { if (min < 0) break; cnt = _block_size; } } } if (min >= 0) return false; _in_arc = min_arc; _next_arc = e; return true; }*/ }; //class BlockSearchPivotRule public: int _init_nb_nodes; ArcsType _init_nb_arcs; /// \name Parameters /// The parameters of the algorithm can be specified using these /// functions. /// @{ /// \brief Set the costs of the arcs. /// /// This function sets the costs of the arcs. /// If it is not used before calling \ref run(), the costs /// will be set to \c 1 on all arcs. /// /// \param map An arc map storing the costs. /// Its \c Value type must be convertible to the \c Cost type /// of the algorithm. /// /// \return <tt>(*this)</tt> template<typename CostMap> NetworkSimplexSimple& costMap(const CostMap& map) { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { _cost[getArcID(a)] = map[a]; } return *this; } /// \brief Set the costs of one arc. /// /// This function sets the costs of one arcs. /// Done for memory reasons /// /// \param arc An arc. /// \param arc A cost /// /// \return <tt>(*this)</tt> template<typename Value> NetworkSimplexSimple& setCost(const Arc& arc, const Value cost) { _cost[getArcID(arc)] = cost; return *this; } /// \brief Set the supply values of the nodes. /// /// This function sets the supply values of the nodes. /// If neither this function nor \ref stSupply() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// \param map A node map storing the supply values. /// Its \c Value type must be convertible to the \c Value type /// of the algorithm. /// /// \return <tt>(*this)</tt> template<typename SupplyMap> NetworkSimplexSimple& supplyMap(const SupplyMap& map) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { _supply[_node_id(n)] = map[n]; } return *this; } template<typename SupplyMap> NetworkSimplexSimple& supplyMap(const SupplyMap* map1, int n1, const SupplyMap* map2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n<n1) _supply[_node_id(n)] = map1[n]; else _supply[_node_id(n)] = map2[n - n1]; } return *this; } template<typename SupplyMap> NetworkSimplexSimple& supplyMapAll(SupplyMap val1, int n1, SupplyMap val2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n<n1) _supply[_node_id(n)] = val1; else _supply[_node_id(n)] = val2; } return *this; } /// \brief Set single source and target nodes and a supply value. /// /// This function sets a single source node and a single target node /// and the required flow value. /// If neither this function nor \ref supplyMap() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// Using this function has the same effect as using \ref supplyMap() /// with such a map in which \c k is assigned to \c s, \c -k is /// assigned to \c t and all other nodes have zero supply value. /// /// \param s The source node. /// \param t The target node. /// \param k The required amount of flow from node \c s to node \c t /// (i.e. the supply of \c s and the demand of \c t). /// /// \return <tt>(*this)</tt> NetworkSimplexSimple& stSupply(const Node& s, const Node& t, Value k) { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } _supply[_node_id(s)] = k; _supply[_node_id(t)] = -k; return *this; } /// \brief Set the type of the supply constraints. /// /// This function sets the type of the supply/demand constraints. /// If it is not used before calling \ref run(), the \ref GEQ supply /// type will be used. /// /// For more information, see \ref SupplyType. /// /// \return <tt>(*this)</tt> NetworkSimplexSimple& supplyType(SupplyType supply_type) { _stype = supply_type; return *this; } /// @} /// \name Execution Control /// The algorithm can be executed using \ref run(). /// @{ /// \brief Run the algorithm. /// /// This function runs the algorithm. /// The paramters can be specified using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// For example, /// \code /// NetworkSimplexSimple<ListDigraph> ns(graph); /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// This function can be called more than once. All the given parameters /// are kept for the next call, unless \ref resetParams() or \ref reset() /// is used, thus only the modified parameters have to be set again. /// If the underlying digraph was also modified after the construction /// of the class (or the last \ref reset() call), then the \ref reset() /// function must be called. /// /// \param pivot_rule The pivot rule that will be used during the /// algorithm. For more information, see \ref PivotRule. /// /// \return \c INFEASIBLE if no feasible flow exists, /// \n \c OPTIMAL if the problem has optimal solution /// (i.e. it is feasible and bounded), and the algorithm has found /// optimal flow and node potentials (primal and dual solutions), /// \n \c UNBOUNDED if the objective function of the problem is /// unbounded, i.e. there is a directed cycle having negative total /// cost and infinite upper bound. /// /// \see ProblemType, PivotRule /// \see resetParams(), reset() ProblemType run() { #if DEBUG_LVL>0 std::cout << "OPTIMAL = " << OPTIMAL << "\nINFEASIBLE = " << INFEASIBLE << "\nUNBOUNDED = " << UNBOUNDED << "\nMAX_ITER_REACHED" << MAX_ITER_REACHED << "\n" ; #endif if (!init()) return INFEASIBLE; #if DEBUG_LVL>0 std::cout << "Init done, starting iterations\n"; #endif return start(); } /// \brief Reset all the parameters that have been given before. /// /// This function resets all the paramaters that have been given /// before using functions \ref lowerMap(), \ref upperMap(), /// \ref costMap(), \ref supplyMap(), \ref stSupply(), \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// For example, /// \code /// NetworkSimplexSimple<ListDigraph> ns(graph); /// /// // First run /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// /// // Run again with modified cost map (resetParams() is not called, /// // so only the cost map have to be set again) /// cost[e] += 100; /// ns.costMap(cost).run(); /// /// // Run again from scratch using resetParams() /// // (the lower bounds will be set to zero on all arcs) /// ns.resetParams(); /// ns.upperMap(capacity).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// \return <tt>(*this)</tt> /// /// \see reset(), run() NetworkSimplexSimple& resetParams() { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } for (ArcsType i = 0; i != _arc_num; ++i) { _cost[i] = 1; } _stype = GEQ; return *this; } /// \brief Reset the internal data structures and all the parameters /// that have been given before. /// /// This function resets the internal data structures and all the /// paramaters that have been given before using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// See \ref resetParams() for examples. /// /// \return <tt>(*this)</tt> /// /// \see resetParams(), run() NetworkSimplexSimple& reset() { // Resize vectors _node_num = _init_nb_nodes; _arc_num = _init_nb_arcs; int all_node_num = _node_num + 1; ArcsType max_arc_num = _arc_num + 2 * _node_num; _source.resize(max_arc_num); _target.resize(max_arc_num); _cost.resize(max_arc_num); _supply.resize(all_node_num); _flow.resize(max_arc_num); _pi.resize(all_node_num); _parent.resize(all_node_num); _pred.resize(all_node_num); _forward.resize(all_node_num); _thread.resize(all_node_num); _rev_thread.resize(all_node_num); _succ_num.resize(all_node_num); _last_succ.resize(all_node_num); _state.resize(max_arc_num); //_arc_mixing=false; if (_arc_mixing && _node_num > 1) { // Store the arcs in a mixed order //ArcsType k = std::max(ArcsType(std::sqrt(double(_arc_num))), ArcsType(10)); const ArcsType k = std::max(ArcsType(_arc_num / _node_num), ArcsType(3)); mixingCoeff = k; subsequence_length = _arc_num / mixingCoeff + 1; num_big_subsequences = _arc_num % mixingCoeff; num_total_big_subsequence_numbers = subsequence_length * num_big_subsequences; #pragma omp parallel for schedule(static) for (Arc a = 0; a <= _graph.maxArcId(); a++) { // --a <=> _graph.next(a) , -1 == INVALID ArcsType i = sequence(_graph.maxArcId()-a); _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); } } else { // Store the arcs in the original order ArcsType i = 0; Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a), ++i) { _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); //_arc_id[a] = i; } } // Reset parameters resetParams(); return *this; } /// @} /// \name Query Functions /// The results of the algorithm can be obtained using these /// functions.\n /// The \ref run() function must be called before using them. /// @{ /// \brief Return the total cost of the found flow. /// /// This function returns the total cost of the found flow. /// Its complexity is O(e). /// /// \note The return type of the function can be specified as a /// template parameter. For example, /// \code /// ns.totalCost<double>(); /// \endcode /// It is useful if the total cost cannot be stored in the \c Cost /// type of the algorithm, which is the default return type of the /// function. /// /// \pre \ref run() must be called before using this function. /*template <typename Number> Number totalCost() const { Number c = 0; for (ArcIt a(_graph); a != INVALID; ++a) { int i = getArcID(a); c += Number(_flow[i]) * Number(_cost[i]); } return c; }*/ template <typename Number> Number totalCost() const { Number c = 0; #ifdef SPARSE_FLOW #ifdef HASHMAP typename std::unordered_map<size_t, Value>::const_iterator it; #else typename std::map<size_t, Value>::const_iterator it; #endif for (it = _flow.data.begin(); it!=_flow.data.end(); ++it) c += Number(it->second) * Number(_cost[it->first]); return c; #else for (ArcsType i = 0; i<_flow.size(); i++) c += _flow[i] * Number(_cost[i]); return c; #endif } #ifndef DOXYGEN Cost totalCost() const { return totalCost<Cost>(); } #endif /// \brief Return the flow on the given arc. /// /// This function returns the flow on the given arc. /// /// \pre \ref run() must be called before using this function. Value flow(const Arc& a) const { return _flow[getArcID(a)]; } /// \brief Return the flow map (the primal solution). /// /// This function copies the flow value on each arc into the given /// map. The \c Value type of the algorithm must be convertible to /// the \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template <typename FlowMap> void flowMap(FlowMap &map) const { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { map.set(a, _flow[getArcID(a)]); } } /// \brief Return the potential (dual value) of the given node. /// /// This function returns the potential (dual value) of the /// given node. /// /// \pre \ref run() must be called before using this function. Cost potential(const Node& n) const { return _pi[_node_id(n)]; } /// \brief Return the potential map (the dual solution). /// /// This function copies the potential (dual value) of each node /// into the given map. /// The \c Cost type of the algorithm must be convertible to the /// \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template <typename PotentialMap> void potentialMap(PotentialMap &map) const { Node n; _graph.first(n); for (; n != INVALID; _graph.next(n)) { map.set(n, _pi[_node_id(n)]); } } /// @} private: // Initialize internal data structures bool init() { if (_node_num == 0) return false; // Check the sum of supply values _sum_supply = 0; for (int i = 0; i != _node_num; ++i) { _sum_supply += _supply[i]; } /*if (!((_stype == GEQ && _sum_supply <= 0) || (_stype == LEQ && _sum_supply >= 0))) return false;*/ // Initialize artifical cost Cost ART_COST; if (std::numeric_limits<Cost>::is_exact) { ART_COST = std::numeric_limits<Cost>::max() / 2 + 1; } else { ART_COST = 0; for (ArcsType i = 0; i != _arc_num; ++i) { if (_cost[i] > ART_COST) ART_COST = _cost[i]; } ART_COST = (ART_COST + 1) * _node_num; } // Initialize arc maps for (ArcsType i = 0; i != _arc_num; ++i) { #ifndef SPARSE_FLOW _flow[i] = 0; //by default, the sparse matrix is empty #endif _state[i] = STATE_LOWER; } #ifdef SPARSE_FLOW _flow = SparseValueVector<Value>(); #endif // Set data for the artificial root node _root = _node_num; _parent[_root] = -1; _pred[_root] = -1; _thread[_root] = 0; _rev_thread[0] = _root; _succ_num[_root] = _node_num + 1; _last_succ[_root] = _root - 1; _supply[_root] = -_sum_supply; _pi[_root] = 0; // Add artificial arcs and initialize the spanning tree data structure if (_sum_supply == 0) { // EQ supply constraints _search_arc_num = _arc_num; _all_arc_num = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _pred[u] = e; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; _state[e] = STATE_TREE; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; } else { _forward[u] = false; _pi[u] = ART_COST; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = ART_COST; } } } else if (_sum_supply > 0) { // LEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _pred[u] = e; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = false; _pi[u] = ART_COST; _pred[u] = f; _source[f] = _root; _target[f] = u; _flow[f] = -_supply[u]; _cost[f] = ART_COST; _state[f] = STATE_TREE; _source[e] = u; _target[e] = _root; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } else { // GEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] <= 0) { _forward[u] = false; _pi[u] = 0; _pred[u] = e; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = true; _pi[u] = -ART_COST; _pred[u] = f; _source[f] = u; _target[f] = _root; _flow[f] = _supply[u]; _state[f] = STATE_TREE; _cost[f] = ART_COST; _source[e] = _root; _target[e] = u; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } return true; } // Find the join node void findJoinNode() { int u = _source[in_arc]; int v = _target[in_arc]; while (u != v) { if (_succ_num[u] < _succ_num[v]) { u = _parent[u]; } else { v = _parent[v]; } } join = u; } // Find the leaving arc of the cycle and returns true if the // leaving arc is not the same as the entering arc bool findLeavingArc() { // Initialize first and second nodes according to the direction // of the cycle if (_state[in_arc] == STATE_LOWER) { first = _source[in_arc]; second = _target[in_arc]; } else { first = _target[in_arc]; second = _source[in_arc]; } delta = INF; char result = 0; Value d; ArcsType e; // Search the cycle along the path form the first node to the root for (int u = first; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? _flow[e] : INF; if (d < delta) { delta = d; u_out = u; result = 1; } } // Search the cycle along the path form the second node to the root for (int u = second; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? INF : _flow[e]; if (d <= delta) { delta = d; u_out = u; result = 2; } } if (result == 1) { u_in = first; v_in = second; } else { u_in = second; v_in = first; } return result != 0; } // Change _flow and _state vectors void changeFlow(bool change) { // Augment along the cycle if (delta > 0) { Value val = _state[in_arc] * delta; _flow[in_arc] += val; for (int u = _source[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? -val : val; } for (int u = _target[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? val : -val; } } // Update the state of the entering and leaving arcs if (change) { _state[in_arc] = STATE_TREE; _state[_pred[u_out]] = (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; } else { _state[in_arc] = -_state[in_arc]; } } // Update the tree structure void updateTreeStructure() { int old_rev_thread = _rev_thread[u_out]; int old_succ_num = _succ_num[u_out]; int old_last_succ = _last_succ[u_out]; v_out = _parent[u_out]; // Check if u_in and u_out coincide if (u_in == u_out) { // Update _parent, _pred, _pred_dir _parent[u_in] = v_in; _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); // Update _thread and _rev_thread if (_thread[v_in] != u_out) { ArcsType after = _thread[old_last_succ]; _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; after = _thread[v_in]; _thread[v_in] = u_out; _rev_thread[u_out] = v_in; _thread[old_last_succ] = after; _rev_thread[after] = old_last_succ; } } else { // Handle the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) int thread_continue = old_rev_thread == v_in ? _thread[old_last_succ] : _thread[v_in]; // Update _thread and _parent along the stem nodes (i.e. the nodes // between u_in and u_out, whose parent have to be changed) int stem = u_in; // the current stem node int par_stem = v_in; // the new parent of stem int next_stem; // the next stem node int last = _last_succ[u_in]; // the last successor of stem int before, after = _thread[last]; _thread[v_in] = u_in; _dirty_revs.clear(); _dirty_revs.push_back(v_in); while (stem != u_out) { // Insert the next stem node into the thread list next_stem = _parent[stem]; _thread[last] = next_stem; _dirty_revs.push_back(last); // Remove the subtree of stem from the thread list before = _rev_thread[stem]; _thread[before] = after; _rev_thread[after] = before; // Change the parent node and shift stem nodes _parent[stem] = par_stem; par_stem = stem; stem = next_stem; // Update last and after last = _last_succ[stem] == _last_succ[par_stem] ? _rev_thread[par_stem] : _last_succ[stem]; after = _thread[last]; } _parent[u_out] = par_stem; _thread[last] = thread_continue; _rev_thread[thread_continue] = last; _last_succ[u_out] = last; // Remove the subtree of u_out from the thread list except for // the case when old_rev_thread equals to v_in if (old_rev_thread != v_in) { _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; } // Update _rev_thread using the new _thread values for (int i = 0; i != int(_dirty_revs.size()); ++i) { int u = _dirty_revs[i]; _rev_thread[_thread[u]] = u; } // Update _pred, _pred_dir, _last_succ and _succ_num for the // stem nodes from u_out to u_in int tmp_sc = 0, tmp_ls = _last_succ[u_out]; for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { _pred[u] = _pred[p]; _forward[u] = !_forward[p]; tmp_sc += _succ_num[u] - _succ_num[p]; _succ_num[u] = tmp_sc; _last_succ[p] = tmp_ls; } _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); _succ_num[u_in] = old_succ_num; } // Update _last_succ from v_in towards the root int up_limit_out = _last_succ[join] == v_in ? join : -1; int last_succ_out = _last_succ[u_out]; for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { _last_succ[u] = last_succ_out; } // Update _last_succ from v_out towards the root if (join != old_rev_thread && v_in != old_rev_thread) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = old_rev_thread; } } else if (last_succ_out != old_last_succ) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = last_succ_out; } } // Update _succ_num from v_in to join for (int u = v_in; u != join; u = _parent[u]) { _succ_num[u] += old_succ_num; } // Update _succ_num from v_out to join for (int u = v_out; u != join; u = _parent[u]) { _succ_num[u] -= old_succ_num; } } void updatePotential() { Cost sigma = _pi[v_in] - _pi[u_in] - ((_forward[u_in])?_cost[in_arc]:(-_cost[in_arc])); int end = _thread[_last_succ[u_in]]; for (int u = u_in; u != end; u = _thread[u]) { _pi[u] += sigma; } } // Heuristic initial pivots bool initialPivots() { Value curr, total = 0; std::vector<Node> supply_nodes, demand_nodes; Node u; _graph.first(u); for (; u != INVALIDNODE; _graph.next(u)) { curr = _supply[_node_id(u)]; if (curr > 0) { total += curr; supply_nodes.push_back(u); } else if (curr < 0) { demand_nodes.push_back(u); } } if (_sum_supply > 0) total -= _sum_supply; if (total <= 0) return true; ArcVector arc_vector; if (_sum_supply >= 0) { if (supply_nodes.size() == 1 && demand_nodes.size() == 1) { // Perform a reverse graph search from the sink to the source //typename GR::template NodeMap<bool> reached(_graph, false); BoolVector reached(_node_num, false); Node s = supply_nodes[0], t = demand_nodes[0]; std::vector<Node> stack; reached[t] = true; stack.push_back(t); while (!stack.empty()) { Node u, v = stack.back(); stack.pop_back(); if (v == s) break; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { if (reached[u = _graph.source(a)]) continue; ArcsType j = getArcID(a); arc_vector.push_back(j); reached[u] = true; stack.push_back(u); } } } else { arc_vector.resize(demand_nodes.size()); // Find the min. cost incomming arc for each demand node #pragma omp parallel for for (int i = 0; i < demand_nodes.size(); ++i) { Node v = demand_nodes[i]; Cost min_cost = std::numeric_limits<Cost>::max(); Arc min_arc = INVALID; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } } else { arc_vector.resize(supply_nodes.size()); // Find the min. cost outgoing arc for each supply node #pragma omp parallel for for (int i = 0; i < int(supply_nodes.size()); ++i) { Node u = supply_nodes[i]; Cost min_cost = std::numeric_limits<Cost>::max(); Arc min_arc = INVALID; Arc a; _graph.firstOut(a, u); for (; a != INVALID; _graph.nextOut(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } // Perform heuristic initial pivots for (ArcsType i = 0; i != ArcsType(arc_vector.size()); ++i) { in_arc = arc_vector[i]; if (_state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] - _pi[_target[in_arc]]) >= 0) continue; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return false; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } return true; } // Execute the algorithm ProblemType start() { return start<BlockSearchPivotRule>(); } template <typename PivotRuleImpl> ProblemType start() { PivotRuleImpl pivot(*this); ProblemType retVal = OPTIMAL; // Perform heuristic initial pivots if (!initialPivots()) return UNBOUNDED; uint64_t iter_number = 0; // Execute the Network Simplex algorithm while (pivot.findEnteringArc()) { if ((++iter_number <= max_iter&&max_iter > 0) || max_iter<=0) { #if DEBUG_LVL>0 if(iter_number>MAX_DEBUG_ITER) break; if(iter_number%1000==0||iter_number%1000==1){ Cost curCost=totalCost(); Value sumFlow=0; Cost a; a= (fabs(_pi[_source[in_arc]])>=fabs(_pi[_target[in_arc]])) ? fabs(_pi[_source[in_arc]]) : fabs(_pi[_target[in_arc]]); a=a>=fabs(_cost[in_arc])?a:fabs(_cost[in_arc]); for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; } std::cout << "Sum of the flow " << std::setprecision(20) << sumFlow << "\n" << iter_number << " iterations, current cost=" << curCost << "\nReduced cost=" << _state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -_pi[_target[in_arc]]) << "\nPrecision = "<< -EPSILON*(a) << "\n"; std::cout << "Arc in = (" << _node_id(_source[in_arc]) << ", " << _node_id(_target[in_arc]) <<")\n"; std::cout << "Supplies = (" << _supply[_source[in_arc]] << ", " << _supply[_target[in_arc]] << ")\n"; std::cout << _cost[in_arc] << "\n"; std::cout << _pi[_source[in_arc]] << "\n"; std::cout << _pi[_target[in_arc]] << "\n"; std::cout << a << "\n"; } #endif findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return UNBOUNDED; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } #if DEBUG_LVL>0 else{ std::cout << "No change\n"; } #endif #if DEBUG_LVL>1 std::cout << "Arc in = (" << _source[in_arc] << ", " << _target[in_arc] << ")\n"; #endif } else { // max iters retVal = MAX_ITER_REACHED; break; } } #if DEBUG_LVL>0 Cost curCost=totalCost(); Value sumFlow=0; Cost a; a= (fabs(_pi[_source[in_arc]])>=fabs(_pi[_target[in_arc]])) ? fabs(_pi[_source[in_arc]]) : fabs(_pi[_target[in_arc]]); a=a>=fabs(_cost[in_arc])?a:fabs(_cost[in_arc]); for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; } std::cout << "Sum of the flow " << std::setprecision(20) << sumFlow << "\n" << niter << " iterations, current cost=" << curCost << "\nReduced cost=" << _state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] -_pi[_target[in_arc]]) << "\nPrecision = "<< -EPSILON*(a) << "\n"; std::cout << "Arc in = (" << _node_id(_source[in_arc]) << ", " << _node_id(_target[in_arc]) <<")\n"; std::cout << "Supplies = (" << _supply[_source[in_arc]] << ", " << _supply[_target[in_arc]] << ")\n"; #endif #if DEBUG_LVL>1 sumFlow=0; for (int i=0; i<_flow.size(); i++) { sumFlow+=_state[i]*_flow[i]; if (_state[i]==STATE_TREE) { std::cout << "Non zero value at (" << _node_num+1-_source[i] << ", " << _node_num+1-_target[i] << ")\n"; } } std::cout << "Sum of the flow " << sumFlow << "\n"<< niter <<" iterations, current cost=" << totalCost() << "\n"; #endif //Check feasibility if(retVal == OPTIMAL){ for (ArcsType e = _search_arc_num; e != _all_arc_num; ++e) { if (_flow[e] != 0){ if (fabs(_flow[e]) > _EPSILON) // change of the original code following issue #126 return INFEASIBLE; else _flow[e]=0; } } } // Shift potentials to meet the requirements of the GEQ/LEQ type // optimality conditions if (_sum_supply == 0) { if (_stype == GEQ) { Cost max_pot = -std::numeric_limits<Cost>::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] > max_pot) max_pot = _pi[i]; } if (max_pot > 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= max_pot; } } else { Cost min_pot = std::numeric_limits<Cost>::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] < min_pot) min_pot = _pi[i]; } if (min_pot < 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= min_pot; } } } return retVal; } }; //class NetworkSimplexSimple ///@} } //namespace lemon_omp
jintailwe.c
/******************************************************************************************** * A simple provably secure key exchange based on the learning with errors problem * * * Based on the paper: * Jintai Ding, Xiang Xie and Xiaodong Ling - 2012 * * Copyright (c) Jintai Ding, Xiang Xie and Xiaodong Ling for the theoretical key exchange * Afraz Arif Khan for implementing the key exchange in C and TLS * * Released under the MIT License; see LICENSE.txt for details. ********************************************************************************************/ /** \file jintailwe.c * Key exchange between Alice and Bob */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <stdbool.h> #include <math.h> #include <string.h> #include "jintailwe.h" #include "dgs.h" int main(int argc, char **argv){ D = dgs_disc_gauss_dp_init(LATTICE_DIMENSION,0,6,DGS_DISC_GAUSS_UNIFORM_TABLE); /************ Allocate Temporary Memory on the Fly **************************/ uint16_t i, j; //Alice Memory Allocation Alice_params.secret_matrix = (int**)malloc(LATTICE_DIMENSION*sizeof(int*)); for(i = 0; i < LATTICE_DIMENSION; i++){ Alice_params.secret_matrix[i] = (int*)malloc(LATTICE_DIMENSION*sizeof(int)); } Alice_params.public_matrix = (int**)malloc(LATTICE_DIMENSION*sizeof(int*)); for(i = 0; i < LATTICE_DIMENSION; i++){ Alice_params.public_matrix[i] = (int*)malloc(LATTICE_DIMENSION*sizeof(int)); } EA = (int**)malloc(LATTICE_DIMENSION*sizeof(int*)); for(i = 0;i < LATTICE_DIMENSION;i++){ EA[i] = (int*)malloc(LATTICE_DIMENSION*sizeof(int)); } edashA = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); KA = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); SKA = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); //Resampling for Alice: Alice1_params.secret_vector = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); Alice1_params.public_vector = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); //Bob Memory Allocation Bob_params.secret_vector = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); eB = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); Bob_params.public_vector = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); edashB = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); KB = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); SKB = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); //Signal Memory Allocation sig = (int*)malloc(sizeof(int)*LATTICE_DIMENSION); time_t t = clock(); if(argc >= 2){ if(strcmp(argv[1],"-help")!=0){ run_key_exchange(argc,argv); } } else{ run_key_exchange(argc,argv); } t = clock() - t; double time_taken = ((double)t)/CLOCKS_PER_SEC; if(argc >= 2){ if(strcmp(argv[1],"--results")==0){ printf("The total time taken for the key exchange is: %fms\n",time_taken*1000 ); printf("\n"); printf("==================Memory Complexity Benchmark=====================\n" ); printf("\n"); memory_consumed(); printf("==============Communicational Complexity Benchmark================\n" ); printf("\n"); communication_complexity(); } if(strcmp(argv[1],"--time")==0 || strcmp(argv[1],"--time-params")==0){ printf("The total time taken for the key exchange is: %fms\n",time_taken*1000 ); } if(strcmp(argv[1],"--mem")==0){ memory_consumed(); } if(strcmp(argv[1],"-help")==0){ printf("COPYRIGHT: Afraz Arif Khan 2018, This software is available under the MIT 2.0 License\n"); printf("=====================================================================================\n"); printf("This is a Lattice Cryptography LWE Post-Quantum Key Exchange\n"); printf("\n\n"); printf("To view all the results including time and memory complexity type:\n"); printf("./jintailwe --results\n"); printf("\n"); printf("To view all the total time taken for the key exchange:\n"); printf("./jintailwe --time\n"); printf("\n"); printf("To view the total time taken for the key exchange and individual processes:\n"); printf("./jintailwe --time-params\n"); printf("\n"); printf("To view all the memory consumed by M, Alice0, Bob and Alice1 for the key exchange:\n"); printf("./jintailwe --mem\n"); printf("\n"); printf("To view Alice and Bobs Shared Keys:\n"); printf("./jintailwe --print-keys\n"); } } if(argc < 2){ printf("Type './jintailwe -help' for further instructions\n"); } return 0; } void run_key_exchange(int argc, char **argv){ double time_taken_M; double time_taken_Alice0; double time_taken_Bob; double time_taken_Alice1; double time_taken_temp; srand(time(NULL)); time_t t = clock(); generate_M(); t = clock() - t; time_taken_M = ((double)t)/CLOCKS_PER_SEC; int i, j; // loop index //------- Generate Alices parameters -------- t = clock(); generate_gaussian_matrix(Alice_params.secret_matrix); generate_gaussian_matrix(EA); /* Implement the following Algorithm: PA = (M.SA + 2*EA) mod q */ //Generate Public Parameter for(i = 0; i < LATTICE_DIMENSION; i++){ for(j = 0; j < LATTICE_DIMENSION; j++){ Alice_params.public_matrix[i][j] = Alice_params.public_matrix[i][j] + (M[i][j]*Alice_params.secret_matrix[i][j] + 2*EA[i][j]); Alice_params.public_matrix[i][j] = (Alice_params.public_matrix[i][j] < 0) ? Alice_params.public_matrix[i][j] % MODULO_Q + MODULO_Q : Alice_params.public_matrix[i][j] % MODULO_Q; } } generate_gaussian_vector(edashA); t = clock() - t; time_taken_Alice0 = ((double)t)/CLOCKS_PER_SEC; //------- Generate Bobs parameters ---------- t = clock(); generate_gaussian_vector(Bob_params.secret_vector); generate_gaussian_vector(eB); generate_gaussian_vector(edashB); //Generate Public Parameter for(i = 0; i < LATTICE_DIMENSION;i++){ for(j = 0; j < LATTICE_DIMENSION; j++){ Bob_params.public_vector[i] = Bob_params.public_vector[i] + (M_TRANSPOSE[i][j]*Bob_params.secret_vector[j] + 2*eB[j]); } Bob_params.public_vector[i] = (Bob_params.public_vector[i] < 0) ? Bob_params.public_vector[i] % MODULO_Q + MODULO_Q : Bob_params.public_vector[i] % MODULO_Q; } //Find Bobs Key for(i = 0; i < LATTICE_DIMENSION; i++){ for(j = 0; j < LATTICE_DIMENSION; j++){ KB[i] = KB[i] + (Alice_params.public_matrix[j][i]*Bob_params.secret_vector[j] + 2*edashB[j])%MODULO_Q; } KB[i] = (KB[i] < 0) ? KB[i] % MODULO_Q + MODULO_Q : KB[i] % MODULO_Q; } t = clock() - t; time_taken_Bob = ((double)t)/CLOCKS_PER_SEC; t = clock(); //Find Alices Key for(i = 0; i < LATTICE_DIMENSION; i++){ for(j = 0; j < LATTICE_DIMENSION; j++){ KA[i] = KA[i] + (Alice_params.secret_matrix[j][i]*Bob_params.public_vector[j] + 2*edashA[j])%MODULO_Q; } KA[i] = (KA[i] < 0) ? KA[i] % MODULO_Q + MODULO_Q : KA[i] % MODULO_Q; } t = clock() - t; time_taken_Alice1 = ((double)t)/CLOCKS_PER_SEC; //-- Check the robust extractor condition till correct params generated ---- i = 0; bool Alice_gen = true; bool Bob_gen = false; double delta = MODULO_Q/4 - 2; while(i < LATTICE_DIMENSION){ if(!check_robust_extractor(KA[i], KB[i])){ //Redo single parameters long offset = discrete_normal_distribution(); if((KA[i] - KB[i])%2 != 0){ KA[i] = KA[i] - 1; //Make it even } if(abs(KA[i] - KB[i]) > delta){ if(KA[i] > KB[i]){ //reduce KA a bit KB[i] = KA[i] + offset; } else{ //reduce KB a bit KB[i] = KA[i] + offset; } } } else{ i = i+1; } } t = clock(); //Shared Keys for(i = 0; i < LATTICE_DIMENSION; i++){ sig[i] = signal_function(KB[i], rand()%2); SKA[i] = robust_extractor(KA[i], sig[i]); SKB[i] = robust_extractor(KB[i], sig[i]); } t = clock()-t; time_taken_temp = ((double)t)/CLOCKS_PER_SEC; time_taken_Bob = time_taken_Bob + (2/3)*time_taken_temp; time_taken_Alice1 = time_taken_Alice1 + (1/3)*time_taken_temp; /******* RESULTS **********/ bool kex_success = true; //--- Check if the keys are the same --- for(i = 0;i < LATTICE_DIMENSION; i++){ if(SKA[i] != SKB[i]){ kex_success = false; } } if(kex_success){ printf("Key Exchange worked, Alice and Bob Share the same key!\n"); } if(argc >= 2){ if(strcmp(argv[1],"--print-keys")==0){ printf("Alice's Key is:\n"); pretty_print_vector(SKA); printf("\n"); printf("Bob's key is:\n"); pretty_print_vector(SKB); printf("\n"); } if(strcmp(argv[1],"--time-params")==0 || strcmp(argv[1],"--results")==0){ printf("============= Time taken for individual parameters ==============\n" ); printf("\n"); printf(" --------- | -------------\n" ); printf("|parameter | Time(ms) \n" ); printf(" -------- | -------------\n" ); printf("| M | %f\n", time_taken_M*1000); printf("| Alice0 | %f\n", time_taken_Alice0*1000); printf("| Bob | %f\n", time_taken_Bob*1000); printf("| Alice1 | %f\n", time_taken_Alice1*1000); printf(" -------- | -------------\n" ); } } } //Generating the public matrix M once and for all void generate_M(){ int i, j; for(i = 0; i < LATTICE_DIMENSION; i++){ for(j = 0; j < LATTICE_DIMENSION; j++){ M[i][j] = rand()%MODULO_Q; M_TRANSPOSE[j][i] = M[i][j]; } } } //This function is broken for the globals void generate_gaussian_matrix(int **gauss_matrix){ //#pragma omp parallel for collapse(2) for(int i = 0; i < LATTICE_DIMENSION; i++){ for(int j = 0; j < LATTICE_DIMENSION; j++){ gauss_matrix[i][j] = discrete_normal_distribution(); } } } void generate_gaussian_vector(int gauss_vec[LATTICE_DIMENSION]){ int i; //Loop index for(i = 0; i < LATTICE_DIMENSION; i++){ gauss_vec[i] = discrete_normal_distribution(); } } int generate_gaussian_scalar(){ return discrete_normal_distribution(); } int robust_extractor(int x, int sigma){ return ((((int)x)%MODULO_Q + (int64_t)(sigma * (MODULO_Q - 1)/2)%MODULO_Q)%2); } bool check_robust_extractor(int x, int y){ double delta = MODULO_Q/4 - 2; return ((x-y)%2 == 0 && abs(x-y) <= delta); } int signal_function(int y, int b){ return !(y >= floor(-MODULO_Q/4) + b && y <= floor(MODULO_Q/4) + b); } void pretty_print_matrix(int **matrix){ int i, j; for(i = 0; i < LATTICE_DIMENSION; i++){ for(j = 0; j < LATTICE_DIMENSION; j++){ printf("Matrix[%i][%i] = %i\n", i, j, matrix[i][j]); } printf("\n"); } } void pretty_print_vector(int vec[LATTICE_DIMENSION]){ int i; for(i = 0; i < LATTICE_DIMENSION; i++){ printf("%i", vec[i]); } } /*------------------- Generate Gaussian numbers in C -------------------------*/ //Makes use of the dgs library long discrete_normal_distribution(){ long val = D->call(D); return val; } /*---------------------------- Test Results ----------------------------------*/ void memory_consumed(){ printf(" --------- | -------------\n" ); printf("|parameter | bytes \n" ); printf(" -------- | -------------\n" ); printf("| M | %i \n", matrix_mem); printf("| Alice0 | %i \n", Alice0_mem_vector*vector_mem + Alice0_mem_matrix*matrix_mem); printf("| Bob | %i \n", Bob_mem_vector*vector_mem); printf("| Alice1 | %i \n", Alice1_mem_vector*vector_mem); printf(" --------- | -------------\n" ); } void communication_complexity(){ printf(" --------- | -------------\n" ); printf("| Communication(bytes) \n" ); printf(" --------- | -------------\n" ); printf("| A -> B | %i \n", matrix_mem ); printf("| B -> A | %i \n", 2*vector_mem ); printf(" --------- | -------------\n" ); }
cython_dL_update_omp.c
/* cython_dL_update_hmc.c * * Rutger van Haasteren, December 12 2015, Pasadena * */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* The aggregated algorithm for use in the Hamiltonian Sampler */ void dL_update_hmc2(const double *pdL, const double *pdLi, const double *pdp, double *pdM, double *pdtj, const int N) { /* Formal derivative of rank-one update of Cholesky decomposition, adjusted to perform all rank-one updates at once for the derivative L'L'^{T} = LL^{T} + diag(B) dL' = L Phi(L^{-1} dB L^{-T}) With Phi the utril function B = B(x) We need: dot(d_L_d_x, p), and trace(L^{-1} d_L_d_x) Assuming we know dB/dx, we can get d_L_d_x from the chain-rule, using d_L_d_B. The output of this function lets us do that: dot(d_L_d_x, p) = dot(M, d_B_d_x) trace(L^{-1} d_L_d_x) = dot(tj, d_B_d_x) Re-parameterized: also works in the limit where a->0 :param pdL: Current updated Cholesky decomposition (L-prime) :param pdLi: Inverse of Cholesky decomposition (L^{-1}) :param pdp: Vector we'll need to multiply dL with :param pdM: The return matrix M (output) :param pdtj: The return vector tj (output) :param N: Size of all the objects */ double *pdLdot, *pdU, *pdLtrans; double r, drdot, dcdot, ds, temp; int i, j, k, index; //const int maxthreads = omp_get_max_threads(); /* Allocate memory for dL transpose */ pdLtrans = malloc(N*N*sizeof(double)); /* Set the input matrices to zero (is quick), and transpose L */ for(i=0; i<N; ++i) { for(j=0; j<N; ++j) { pdM[j+N*i] = 0.0; pdLtrans[j+N*i] = pdL[i+N*j]; } /* for j */ pdtj[i] = 0.0; } /* for i */ #pragma omp parallel private(i, j, k, index, pdLdot, pdU, r, drdot, dcdot, ds, temp) shared(pdL, pdLtrans, pdLi, pdp, pdM, pdtj) default(none) { //const int nthreads = omp_get_num_threads(); //const int ithread = omp_get_thread_num(); double *pdMlocal, dtjlocal; pdMlocal = calloc(N, sizeof(double)); //printf("In thread %i of %i\n", ithread, nthreads); /* The index i represents the basis vector we are working with */ #pragma omp for nowait // schedule(dynamic) for(i=0; i<N; ++i) { /* Allocate memory inside the parallel region */ pdLdot = calloc(N, sizeof(double)); /* columns of Ldot are stored only */ pdU = calloc(N, sizeof(double)); /* basis vector we are updating */ /* Initialize all our quantities */ pdU[i] = 1.0; temp = 0.0; dtjlocal = 0.0; /* The index k represents the row of Ldot we are working with */ for(k=0; k<N; ++k) { r = pdL[k+N*k]; /* Initialize the vector quantities */ drdot = 0.5*pdU[k]*pdU[k] / r; dcdot = drdot/pdL[k+N*k]; ds = pdU[k] / pdL[k+N*k]; /* Clear Ldot data */ if(k > 0) { pdLdot[k-1] = 0.0; } /* if k */ pdLdot[k] = drdot; /* Update Ldot */ /* The index j represents the column of Ldot we are working with */ for(j=k+1; j<N; ++j) { /* Using the transpose of pdL is faster */ //pdLdot[j] = ds*pdU[j] - dcdot * pdL[k+N*j]; pdLdot[j] = ds*pdU[j] - dcdot * pdLtrans[j+N*k]; } /* for j */ /* Update U */ for(j=k+1; j<N; ++j) { /* Using the transpose of pdL is faster */ //pdU[j] = pdU[j] - ds*pdL[k+N*j]; pdU[j] = pdU[j] - ds*pdLtrans[j+N*k]; } /* for j */ /* Update M */ temp = 0; for(j=k; j<N; ++j) { temp += pdLdot[j]*pdp[j]; } /* for j */ //pdM[i+N*k] += temp; pdMlocal[k] = temp; /* Update tj */ temp = 0; for(j=0; j<N; ++j) { temp += pdLi[j+N*k]*pdLdot[j]; } /* for j */ //pdtj[i] += temp; dtjlocal += temp; } /* for k */ /* How do I update pdM and pdtj FAST????? */ /* Depends on the compiler flags!! */ #pragma omp critical { for(k=0; k<N; ++k) { index = i+N*k; /* Doing this is FAST */ /* pdM[index] = 1.337; */ /* But instead this, is SLOW */ pdM[index] = pdMlocal[k]; //pdM[index] = 1.337; } /* for k */ /* Doing this is FAST */ /* pdtj[i] += 1.445; */ /* But instead this, is SLOW */ pdtj[i] += dtjlocal; //pdtj[i] += 1.445; } /* Free memory of parallel regions */ free(pdLdot); free(pdU); } /* for i */ free(pdMlocal); } /* pragma omp parallel */ free(pdLtrans); return; } /* dL_update_hmc */
ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #pragma once #ifndef OPS_H_ #define OPS_H_ #include <op_boilerplate.h> #include <array/DataTypeUtils.h> #include <helpers/shape.h> #include <vector> #include <Environment.h> #include <loops/summarystatsreduce.h> #include <loops/ReduceType.h> #define MIN_V 1e-12 #define MAX_FLOAT 1e37 #define MIN_FLOAT 1e-37 #define MAX_INT 2147483647 #define MIN_CUTFOFF -3.79297773665f #define FLOAT_MIN_NORMAL 1.17549435e-38 #define EPS 1e-5 #define AFFINITY close #define DOUBLE_PI_T T(2.0 * 3.14159265358979323846) #define DOUBLE_PI_X X(2.0 * 3.14159265358979323846) #define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #ifdef __CUDACC__ #define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #else // hacky fix for isnan/being being out of scope //#ifdef IOS //#define isinf(x) 0 // this isn't right. But std::isinf fails //#define isnan(x) 0 //#else //#define isnan std::isnan //#define isinf std::isinf //#endif #define no_op_exec_special_cuda #define no_op_exec_special_accumulation_cuda #define no_op_exec_special_accumulation_same_cuda #define no_op_exec_special_accumulation_long_cuda #define no_op_exec_special_any_cuda #define no_op_exec_special_bool_cuda #define no_op_exec_special_same_cuda #define no_op_exec_special_accumulation_same_cuda #endif #define SELU_ALPHA 1.6732632423543772848170429916717 #define SELU_LAMBDA 1.0507009873554804934193349852946 #ifdef _OPENMP #pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\ initializer (omp_priv=0) #pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0) #pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in * omp_out)\ initializer (omp_priv=1) #endif namespace functions { namespace indexreduce { template <typename T> struct IndexValue { T value; Nd4jLong index; _CUDA_HD IndexValue() = default; _CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {} }; } namespace summarystats { template <typename T> class SummaryStatsData; } } namespace simdOps { template <typename X, typename Y, typename Z> class Add { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 + params[0]); } op_def static X startingValue() { return static_cast<X>(0.f); } }; template <typename X, typename Y> class NewAdd { public: op_def static X op(X d1, Y d2, X *params) { return d1 + d2; } }; template <typename X, typename Y, typename Z> class Subtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 - params[0]); } }; template <typename X, typename Y, typename Z> class SquaredSubtract { public: op_def static Z op(X d1, Y d2) { auto d = static_cast<Z>(d1 - d2); return d * d; } op_def static Z op(X d1, Y d2, Z *params) { auto d = static_cast<Z>(d1 - d2); return d * d; } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto d = static_cast<Z>(d1 - params[0]); return d * d; } }; template <typename X, typename Y, typename Z> class SquaredReverseSubtract { public: op_def static Z op(X d1, Y d2) { auto d = static_cast<Z>(d2 - d1); return d * d; } op_def static Z op(X d1, Y d2, Z *params) { auto d = static_cast<Z>(d2 - d1); return d * d; } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto d = static_cast<Z>(params[0] - d1); return d * d; } }; template <typename X, typename Y, typename Z> class ReverseSubtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] - d1); } }; template <typename X, typename Y, typename Z> class LogPoissonLossFull { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z) { auto zz = static_cast<Z>(z); return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)); } // op for MetaOps op_def static X op(X z, Y *params) { return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z))); } }; template <typename X, typename Y, typename Z> class LogPoissonLoss { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z) { return static_cast<Z>(z); } // op for MetaOps op_def static Z op(X z, Y *params) { return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0])); } }; template <typename X, typename Y, typename Z> class Multiply { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 * params[0]); } op_def static X startingValue() { return static_cast<X>(1.f); } }; template <typename X, typename Y, typename Z> class Divide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 / params[0]); } op_def static X startingValue() { return static_cast<X>(1); } }; template <typename X, typename Y, typename Z> class SafeDivide { public: op_def static Z op(X d1, Y d2) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { if(params[0] == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / params[0]); } }; template <typename X, typename Y, typename Z> class FloorDiv { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1)); } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0])); } }; template <typename X, typename Y, typename Z> class TruncateDiv { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 / i2); } }; template <typename X, typename Y, typename Z> class TruncateMod { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 % i2); } }; template<typename X, typename Y, typename Z> class Remainder { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FMod { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FloorMod { public: op_def static Z op(X d1, Y d2) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1, Y d2, Z *params) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseDivide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] / d1); } }; template <typename X, typename Y, typename Z> class CopyPws { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X> class Copy { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X, typename Y, typename Z> class Copy2 { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X, typename Y, typename Z> class Axpy { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 + d1); } op_def static Z op(X d1, Y d2, Z *params) { auto alpha = params[0]; return alpha * static_cast<Z>(d1) + static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class Assign { public: no_op_exec_special_any no_op_exec_special_any_cuda op_def static Z op(X d1, X *params) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class And { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X> class IntOr { public: op_def static X op(X d1, X d2) { return d2 | d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class IntAnd { public: op_def static X op(X d1, X d2) { return d2 & d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class IntXor { public: op_def static X op(X d1, X d2) { return d2 ^ d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class ShiftLeft { public: op_def static X op(X d1, X d2) { return d1 << d2; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class ShiftRight { public: op_def static X op(X d1, X d2) { return d1 >> d2; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class CyclicShiftLeft { public: op_def static X op(X d1, X d2) { return d1 << d2 | d1 >> ((sizeof(X) * 8) - d2); } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class CyclicShiftRight { public: op_def static X op(X d1, X d2) { return d1 >> d2 | d1 << ((sizeof(X) * 8) - d2); } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X, typename Z> class Or { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X, typename Z> class Xor { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Z> class Not { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0); } // this transform op should run only on boolean input op_def static Z op(X d1, X *params) { auto b1 = static_cast<bool>(d1); return !b1; } }; template <typename X, typename Y, typename Z> class LogicalNot { public: op_def static Z op(X d1, Y d2) { return !((int) d1 && (int) d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2))); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class LogicalXor { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return (i1 | i2) &~ (i1 & i2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalAnd { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) & static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(Y d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalOr { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) | static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class Mod { public: /* // just a optional note, feel free to remove later op_def static half op(half d1, half d2, half *params) { return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr)); } */ op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) % static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseMod { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d2) % static_cast<int>(d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; /** * Whether 2 elements in an array * are epsilion equal */ template <typename X, typename Z> class Epsilon { public: op_def static Z op(X d1, X d2) { X diff = d1 - d2; X absDiff = nd4j::math::nd4j_abs<X>(diff); if (absDiff <= static_cast<X>(MIN_V)) return static_cast<Z>(1); return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class EqualTo { public: op_def static Z op(X d1, X d2) { return d1 == d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class NotEqualTo { public: op_def static Z op(X d1, X d2) { return d1 != d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 >= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThan { public: op_def static Z op(X d1, X d2) { return d1 > d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThan { public: op_def static Z op(X d1, X d2) { return d1 < d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 <= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X> class Abs { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_abs<X>(d1); } }; template <typename X> class Ceiling { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_ceil<X,X>(d1); } }; template <typename X> class Cosine { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cos<X,X>(d1); } }; template <typename X> class Exp { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X> class HardTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f)); } }; template <typename X> class HardTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 < static_cast<X>(-1)) return static_cast<X>(-1); else if (d1 > static_cast<X>(1)) return static_cast<X>(1); else return d1; } }; template <typename X> class Floor { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_floor<X,X>(d1); } }; template <typename X> class Log { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(d1); } }; template <typename X> class Log1p { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(1 + d1); } }; template <typename X, typename Y, typename Z> class LogX { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ; } }; template <typename X> class StabilizeFP16 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return static_cast<X>(nd4j::DataTypeUtils::min<float16>()); else return d1; } }; template <typename X> class StabilizeX { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return nd4j::DataTypeUtils::min<X>(); else return d1; } }; template <typename X> class SpecialDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1.f) - d1); } }; template <typename X> class Neg { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return -d1; } }; template <typename X> class Erf { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erf<X,X>(d1); } }; template <typename X> class Erfc { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erfc<X,X>(d1); } }; template <typename X> class Reciprocal { public: no_op_exec_special_same no_op_exec_special_same_cuda // op_def static T op(T d1) { // return (T(1.0f) / d1); // } // op for MetaOps op_def static X op(X d1, X *params) { return (static_cast<X>(1) / d1); } }; template <typename X, typename Z> class Sqr { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } }; template <typename X, typename Y, typename Z> class RelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_re<X>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X threshold = params[0]; return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryMinimumAbsoluteRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, X *params) { X d2 = params[0]; X thresholdRelative = params[1]; X thresholdAbsolute = params[2]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1, Y d2, Z *params) { X thresholdRelative = params[0]; X thresholdAbsolute = params[1]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class ReversePow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(params[0], d1); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class Pow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class PowDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1) { return d1; } }; template <typename X> class Round { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_round<X,X>(d1); } }; template <typename X, typename Z> class IsNan { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class Expm1 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1); } }; template <typename X, typename Z> class IsPositive { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return d1 > (X)0.f; } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInf { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInfOrNan{ public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction != static_cast<X>(0); } }; template <typename X, typename Z> class IsFinite { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction != static_cast<X>(0); } }; template <typename X> class ClipByValue { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 > params[1]) return params[1]; if (d1 < params[0]) return params[0]; return d1; } }; template <typename X, typename Y, typename Z> class LstmClip { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X _v = (X) d2; if (d1 > _v) return _v; else if (d1 < -_v) return -_v; else return d1; } }; template <typename X> class Swish { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1); } }; template <typename X> class GELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1); } }; template <typename X> class PreciseGELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto sp = nd4j::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI)); auto xp = d1 + nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3)); return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + nd4j::math::nd4j_tanh<X, X>(sp * xp)); } }; template <typename X> class GELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto x17 = static_cast<X>(1.702f) * d1; auto ep = nd4j::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17); // (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2 return (ep * (static_cast<X>(1.f) + ep + x17)) / nd4j::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2); } }; template <typename X> class PreciseGELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto x79 = static_cast<X>(0.797885) * d1; auto x03 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3); auto x39 = static_cast<X>(0.398942) * d1; auto x05 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3); auto scz = nd4j::math::nd4j_sech<X, X>(x79 + x03); // 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3] return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * nd4j::math::nd4j_tanh<X, X>(x79 + x03); } }; template <typename X> class SwishDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1); return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f)); } }; template <typename X> class LogSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1)); } }; template <typename X> class LogSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1); return static_cast<X>(1.f) / (ex + static_cast<X>(1.f)); } }; template <typename X> class Sigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoid<X, X>(d1); } }; template <typename X> class SigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoidderivative<X, X>(d1); } }; template <typename X> class HardSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f))); } }; template <typename X> class HardSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f); } }; /** * Scale to be between a min and max */ template <typename X> class SetRange { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto min = params[0]; auto max = params[1]; if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max) return d1; if (min == static_cast<X>(0) && max == static_cast<X>(1)) { auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1)); return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min); } return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min); } }; template <typename X> class Sin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sin<X,X>(d1); } }; template <typename X> class Square { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1; } }; template <typename X, typename Z> class Sqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X, typename Z> class RSqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X> class Rint { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_rint<X,X>(d1); } }; template <typename X> class SoftPlus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::softplus<X, X>(d1); } }; template <typename X> class Sign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0)); } }; template <typename X> class TimesOneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1) - d1); } }; template <typename X> class RationalTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { // keep 2/3 as runtime variable, to match precision auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1; auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) ))); return static_cast<X>(1.7159f) * tanh; } }; template <typename X> class RationalTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1; auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)); auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a); return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv; } }; template <typename X> class Tanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanh<X, X>(d1); } }; template <typename X> class RectifiedTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1)); } }; template <typename X> class RectifiedTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f); } }; template <typename X> class ATanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atanh<X,X>(d1); } }; template <typename X> class TanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanhderivative<X,X>(d1); } }; template <typename X> class Cube { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1 * d1; } }; template <typename X> class CubeDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(3) * d1 * d1; } }; template <typename X> class ACos { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acos<X, X>(d1); } }; template <typename X> class ASinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asinh<X, X>(d1); } }; template <typename X> class ASinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f))); } }; template <typename X> class ACosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acosh<X, X>(d1); } }; template <typename X> class ACoshDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f))); } }; template <typename X> class Ones { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.0f); } }; template <typename X> class SoftSign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsign<X, X>(d1); } }; template <typename X> class SoftSignDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsignderivative<X,X>(d1); } }; template <typename X, typename Z> class MatchConditionBool { public: no_op_exec_special_bool no_op_exec_special_bool_cuda // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false; case 2: // less_than return d1 < compare ? true : false; case 3: // greater_than return d1 > compare ? true : false; case 4: // less_or_equals_than return d1 <= compare ? true : false; case 5: // greater_or_equals_than return d1 >= compare ? true : false; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? true : false; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? true : false; case 10: return (d1 == compare) ? true : false; case 11: return (d1 != compare) ? true : false; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)); case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1); default: printf("Undefined match condition: [%i]\n", mode); } return d1; } }; template <typename X, typename Z> class MatchCondition { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return old + opOutput; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return old + opOutput; } // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0; case 2: // less_than return d1 < compare ? 1 : 0; case 3: // greater_than return d1 > compare ? 1 : 0; case 4: // less_or_equals_than return d1 <= compare ? 1 : 0; case 5: // greater_or_equals_than return d1 >= compare ? 1 : 0; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? 1 : 0; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? 1 : 0; case 10: return (d1 == compare) ? 1 : 0; case 11: return (d1 != compare) ? 1 : 0; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0; case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0; default: printf("Undefined match condition: [%i]\n", mode); } return d1; } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class ELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_elu<X,Z>(d1, static_cast<X>(d2)); } }; template <typename X, typename Y, typename Z> class ELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_eluderivative<X,Z>(d1, static_cast<X>(d2)); } }; template <typename X, typename Y, typename Z> class RELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto xt = static_cast<Z>(d1); auto xf = static_cast<Z>(d2); return xt < xf ? xf : xt; } }; template <typename X, typename Y, typename Z> class SXELogitsSmoother { public: op_def static Z op(X d1, Y d2, Z *params) { return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2; } }; template <typename X, typename Y, typename Z> class RELU6 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params); return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6); } }; template <typename X, typename Y, typename Z> class LeakyRELU { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { auto val = static_cast<Z>(d1); auto alpha = static_cast<Z>(d2); return val < 0.0f ? alpha * val : val; } }; template <typename X> class SELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA)); } }; template <typename X> class SELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X, typename Y, typename Z> class LeakyRELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { if (d1 >= static_cast<X>(0)) return static_cast<Z>(1); else return static_cast<Z>(d2); } }; template <typename X> class ASin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asin<X,X>(d1); } }; template <typename X> class Sinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sinh<X,X>(d1); } }; template <typename X> class SinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X, X>(d1); } }; template <typename X> class Cosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X,X>(d1); } }; template <typename X> class Tan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tan<X,X>(d1); } }; template <typename X> class TanDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f)); } }; template <typename X> class ATan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atan<X, X>(d1); } }; template <typename X, typename Y, typename Z> class Atan2 { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_atan2<X, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X> class Identity { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X> class Stabilize { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X k = params[0]; if (d1 * k > static_cast<X>(- MIN_CUTFOFF)) return static_cast<X>(- MIN_CUTFOFF) / k; else if (d1 * k < static_cast<X>(MIN_CUTFOFF)) return static_cast<X>(MIN_CUTFOFF) / k; return d1; } }; template <typename X, typename Y, typename Z> class Step { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0)); } }; template <typename X> class OneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1) - d1; } }; template <typename X> class Sum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class ReduceSameBenchmarkOp { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { auto f1 = static_cast<float>(d1); return static_cast<X>(nd4j::math::nd4j_pow<float,float,float>(f1, 3) + nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1) / nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1) * nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1) - nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1)); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class ShannonEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { auto p = d1 * d1; return static_cast<Z>(p) * nd4j::math::nd4j_log<X, Z>(p); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return -reduction; } }; template <typename X, typename Z> class LogEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { //entropy is -sum(p(x) * log(p(x))); log entropy is log of this return nd4j::math::nd4j_log<Z, Z>(-reduction); } }; template <typename X, typename Z> class Entropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x))) } }; template <typename X> class ASum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::ASUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X, typename Z> class CountNonZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::ASUM; op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class CountZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return static_cast<Z>(reduction); } }; template <typename X> class Prod { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT; op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Any { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ; } }; template <typename X, typename Z> class All { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT; op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0); } }; template <typename X, typename Z> class Mean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return d1; } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction / (Z) n; } }; template <typename X, typename Z> class ReduceFloatBenchmarkOp { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { auto f1 = static_cast<float>(d1); return static_cast<Z>(nd4j::math::nd4j_pow<float,float,float>(f1, 3) + nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1) / nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1) * nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1) - nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1)); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return (Z) reduction / (Z) n; } }; template <typename X, typename Z> class AMean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n); } }; template <typename X> class Max { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::MAX; op_def static X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class AMaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class AMinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class MaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X, typename Y, typename Z> class MinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X> class AMax { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::AMAX; op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class AMin { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::AMIN; op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class Min { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::MIN; op_def static X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm1 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1)); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm2 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<Z, Z>(reduction); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } }; template <typename X, typename Z> class SquaredNorm { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction; } }; template <typename X, typename Z> class NormFrobenius { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { X v = nd4j::math::nd4j_abs<X>(d1); return static_cast<Z>(v * v); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<Z, Z>(reduction); } }; template <typename X, typename Z> class NormP { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]); } }; template <typename X, typename Z> class NormMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(old), nd4j::math::nd4j_abs<Z>(opOutput)); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(reduction), nd4j::math::nd4j_abs<Z>(reduction)); } }; template <typename X, typename Z> class Variance { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static X op(X d1, Z *extraParams) { X mean = static_cast<X>(extraParams[0]); X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { // T bias = extraParams[1]; // return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1) return static_cast<Z>(reduction) / static_cast<Z>(n - 1); } }; /** * Standard deviation of a buffer */ template <typename X, typename Z> class StandardDeviation { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z op(X d1, Z *extraParams) { X mean = extraParams[0]; X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams); Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret); return sqrtRet; } }; template <typename X, typename Y> class CosineSimilarity { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(d1 * d1); extraParams[1] += static_cast<Y>(d2 * d2); return static_cast<Y>(d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2)); return static_cast<Y>(d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class JaccardDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { // num / denom return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]); } op_def static Y num(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static Y denom(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(num(d1, d2)); extraParams[1] += static_cast<Y>(denom(d1, d2)); return static_cast<Y>(0.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2)); return static_cast<Y>(0.0f); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class SimpleHammingDistance { public: static const int extraParamsLen = 0; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return static_cast<Y>(reduction / n); } op_def static Y op(X d1, X d2, Y *extraParams) { return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { return op(d1, d2, extraParams); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class CosineDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]))); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1)); extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2)); return (d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2)); return (d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; /** * Dot product between 2 arrays */ template <typename X, typename Y> class Dot { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op //delete[] * extraParamsRef; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return static_cast<Y>(d1 * d2); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; /** * Op to check equality within arrays */ template <typename X, typename Z> class EqualsWithEps { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) { return reduction; } op_def static Z op(X d1, X d2, Z *extraParamsRef) { double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]); return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps)); } #ifdef __CUDACC__ __device__ static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) { return opOutput + old; } op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {} }; template <typename X, typename Y> class EuclideanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return nd4j::math::nd4j_sqrt<Y, Y>(reduction); } op_def static Y op(X d1, X d2, Y *extraParamsRef) { X ret = d1 - d2; return static_cast<Y>(ret * ret); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; template <typename X, typename Y> class ManhattanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return nd4j::math::nd4j_abs<X>(d1 - d2); } op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return old + opOutput; } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif #ifndef __clang__ #pragma omp declare simd uniform(extraParamsRef) #endif op_def static Y merge(X old, X opOutput, X *extraParamsRef) { return update(old, opOutput, extraParamsRef); } }; template <typename X, typename Z> class IndexAbsoluteMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return nd4j::math::nd4j_abs<X>(val); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value > old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(const X *input) { return 0; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class FirstIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); //printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index > opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index > f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X, typename Z> class LastIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index < opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index < f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X, typename Z> class IndexMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value > old.value) { return opOutput; } #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value > f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class IndexAbsoluteMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class IndexMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value < f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsVariance { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { Z ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return static_cast<Z>(val.variance()); return ret; } return static_cast<Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsStandardDeviation { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { auto ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); else return nd4j::math::nd4j_sqrt<double, Z>(ret); } return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X> class DropOut { public: no_op_exec_special_same no_op_exec_special_same_cuda inline _CUDA_D static X op(X d1, X *params) { X prob = params[0]; #ifdef __CUDACC__ X length = params[1]; X tid = blockIdx.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<X>(0.0f) : d1; } }; template <typename X, typename Y, typename Z> class DropOutInverted { public: no_op_exec_special no_op_exec_special_cuda #ifdef __CUDACC__ __device__ #endif inline static Z op(X d1, Y d2, Z *params) { Y prob = d2; #ifdef __CUDACC__ X length = params[1]; X tid = blockIdx.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob)); } }; template <typename X, typename Y, typename Z> class ReplaceNans { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ; } }; // this op is used for conditional pairwise transforms only template <typename X, typename Y, typename Z> class CompareAndReplace{ public: // op definition for PairWise Transform op_def static Z op(X d1, Y d2, Z *params) { auto zd1 = static_cast<Z>(d1); auto zd2 = static_cast<Z>(d2); auto compare = params[0]; auto eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps) return zd2; else return zd1; else if (mode == 1) // not equals eps if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps) return zd2; else return zd1; else if (mode == 2) // less_than eps if (zd1 < compare) return zd2; else return zd1; else if (mode ==3) // greater_than if (zd1 > compare) return zd2; else return zd1; else if (mode == 4) // less_or_equals_than if (zd1 <= compare) return zd2; else return zd1; else if (mode == 5) // greater_or_equals_than if (zd1 >= compare) return zd2; else return zd1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(zd1) < compare) return zd2; else return zd1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(zd1) > compare) return zd2; else return zd1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(zd1)) return zd2; else return zd1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(zd1)) return zd2; else return zd1; else if (mode == 10) if (zd1 == compare) return zd2; else return zd1; else if (mode == 11) if (zd1 != compare) return zd2; else return zd1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) >= compare) return zd2; else return zd1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) <= compare) return zd2; else return zd1; else printf("Undefined boolean operation: [%i]\n", mode); return zd1; } }; template <typename X, typename Y, typename Z> class CompareAndSet { public: // op definition for PairWise Transform op_def static Z op(X dX, Y dY, Z *params) { auto d1 = static_cast<Z>(dX); auto d2 = static_cast<Z>(dY); auto compare = params[0]; auto eps = params[2]; auto mode = static_cast<int>(params[3]); if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than if (d2 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d2 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d2 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d2 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(d2) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(d2) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d2)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d2)) return d2; else return d1; else if (mode == 10) if (d2 == compare) return d2; else return d1; else if (mode == 11) if (d2 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; template <typename X> class CompareAndSetTransform { public: no_op_exec_special_same no_op_exec_special_same_cuda // op definition for Transform op_def static X op(X d1, X *params) { auto compare = params[0]; auto set = params[1]; auto eps = params[2]; // with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1; else if (mode == 2) // less_than if (d1 < compare) return set; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return set; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return set; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return set; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<X>(d1) < compare) return set; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<X>(d1) > compare) return set; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return set; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return set; else return d1; else if (mode == 10) if (d1 == compare) return set; else return d1; else if (mode == 11) if (d1 != compare) return set; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) >= compare) return set; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) <= compare) return set; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; } #endif
shrink_mex.c
/* * Implements an equivalent of: * shrink = @(X,lambda) sign(X).*max( abs(X) - lambda, 0 ); * * but much more memory efficient, and about as fast (and faster for large, * memory-limited systems) * * Variant: * shrink = @(X,lambda,offset) sign(X-offset).*max( abs(X-offset) - lambda, 0 ); * * * eg. x = randn(8e3); *tic; y = shrink( x, .3 ); toc Elapsed time is 1.200497 seconds. *tic; yy = shrink_mex( x, .3 ); toc Elapsed time is 0.418361 seconds. * * * No special compilation instructions needed * Just do: mex shrink_mex.c (assuming you've setup your mex/compiler) * * Stephen Becker, 3/15/18 * */ #include <math.h> #include "mex.h" /* Input Arguments */ #define X_IN prhs[0] #define LAMBDA prhs[1] #define OFFSET prhs[2] /* Output Arguments */ #define Y_OUT plhs[0] #if !defined(MAX) #define MAX(A, B) ((A) > (B) ? (A) : (B)) #endif /* * http://hpac.rwth-aachen.de/teaching/pp-16/material/08.OpenMP-4.pdf * - Aliasing issues * double * __restrict__ a * - Alignment * __assume_aligned(a, 32); // Intel * - *-ffast-math /fp:fast -fp-model fast=2 * - inline functions *#pragma omp declare simd uniform(a) linear(1: b) * *file:///Users/srbecker/Downloads/SIMD%20Vectorization%20with%20OpenMP.PDF * AVX is 4 doubles (256 bit), AVX-512/MIC is twice that (SSE is half that) * "Before OpenMP 4.0..." * */ void shrink( const size_t n, const double lambda, const double *x, double *y ) { size_t i; /* #pragma omp simd */ /*#pragma omp parallel for simd*/ /*#pragma omp simd aligned(a, b: 32)*/ for ( i=0; i < n; i++ ){ y[i] = copysign( MAX( 0.0, fabs(x[i]) - lambda ), x[i] ); /* How much speedup from SIMD could we expect? Not much. Even this simple code takes about the same time */ /* y[i] = 3.0; */ } /* copysign( double x, double y ); C99 standard * Composes a floating point value with the magnitude of x and the sign of y. * see also fabs, signbit * */ } void shrink_offset( const size_t n, const double lambda, const double *x, double *y, const double offset ) { size_t i; for ( i=0; i < n; i++ ) y[i] = copysign( MAX( 0.0, fabs(x[i]-offset) - lambda ), x[i]-offset ); } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray*prhs[] ) { double *xp; /* pointer to input array x */ double *yp; /* pointer to output array y */ double lambda, offset, *offset_ptr; size_t m,n, j; /* Check for proper number of arguments */ if (nrhs < 2) { mexErrMsgIdAndTxt( "MATLAB:shrink_mex:invalidNumInputs", "Two to three input arguments required."); } else if (nlhs > 1) { mexErrMsgIdAndTxt( "MATLAB:shrink_mex:maxlhs", "Too many output arguments."); } if (!mxIsDouble(X_IN) || mxIsComplex(X_IN) || mxIsSparse(X_IN) ) { mexErrMsgIdAndTxt( "MATLAB:shrink_mex:invalidX", "SHRINK_MEX requires a real non-sparse double input."); } /* do these after verifying nrhs >= 2, otherwise error! */ m = mxGetM(X_IN); n = mxGetN(X_IN); lambda = mxGetScalar(LAMBDA); /* Create a matrix for the return argument */ Y_OUT = mxCreateDoubleMatrix( (mwSize)m, (mwSize)n, mxREAL); /* Assign pointers to the various parameters */ yp = mxGetPr(Y_OUT); xp = mxGetPr(X_IN); if ( nrhs > 2 ) { /* the offset could be a scalar or vector */ if ( mxGetNumberOfElements(OFFSET) > 1 ) { if ( mxGetNumberOfElements(OFFSET) != n ) { mexErrMsgIdAndTxt( "MATLAB:shrink_mex:invalidOffset", "Offset should be scalar or vector of size equal to number of columns of main input."); } offset_ptr = mxGetPr(OFFSET); for (j=0; j<n; j++) shrink_offset( m, lambda, xp+j*m, yp+j*m, offset_ptr[j] ); } else { offset = mxGetScalar(OFFSET); shrink_offset( m*n, lambda, xp, yp, offset ); } } else { shrink( m*n, lambda, xp, yp ); } return; }
GB_unop__identity_fc64_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc64_int8 // op(A') function: GB_unop_tran__identity_fc64_int8 // C type: GxB_FC64_t // A type: int8_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc64_int8 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ch_ompss.c
#include "ch_common.h" #include "../timing.h" #include "../timing_override.h" void cholesky_mpi(const int ts, const int nt, double * SPEC_RESTRICT A[nt][nt], double * SPEC_RESTRICT B, double * SPEC_RESTRICT C[nt], int *block_rank) { #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) #pragma omp parallel { chameleon_thread_init(); } // necessary to be aware of binary base addresses to calculate offset for target entry functions chameleon_determine_base_addresses((void *)&cholesky_mpi); #endif #ifdef USE_TIMING #pragma omp parallel #pragma omp master INIT_TIMING(omp_get_num_threads()); #endif int send_cnt = 0; char recv_flag = 0, *send_flags = malloc(sizeof(char) * np); MPI_Request recv_req, *send_reqs = malloc(sizeof(MPI_Request) * np); #ifdef TRACE static int event_communication = -1; char* event_name = "communication"; if(event_communication == -1) { int ierr; ierr = VT_funcdef(event_name, VT_NOCLASS, &event_communication); } #endif START_TIMING(TIME_TOTAL); #pragma omp parallel { for (int k = 0; k < nt; k++) { #if PRINT_DEBUG my_print("Iteration [%03d][000]\tR#%d T#%d (OS_TID:%ld): --> 0 Starting new loop iter\n", k, mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif if (block_rank[k*nt+k] == mype) { #pragma omp single { // first calculate diagonal element omp_potrf(A[k][k], ts, ts); } #pragma omp master { START_TIMING(TIME_COMM); send_cnt = 0; reset_send_flags(send_flags); send_cnt = get_send_flags(send_flags, block_rank, k, k, k+1, nt-1, nt); if (send_cnt != 0) { #ifdef TRACE VT_begin(event_communication); #endif int exec_wait = 0; send_cnt = 0; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { exec_wait = 1; MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_reqs[send_cnt++]); } } if(exec_wait) waitall(send_reqs, send_cnt); #ifdef TRACE VT_end(event_communication); #endif } END_TIMING(TIME_COMM); } } else { #pragma omp single { START_TIMING(TIME_COMM); recv_flag = 0; get_recv_flag(&recv_flag, block_rank, k, k, k+1, nt-1, nt); if (recv_flag) { #ifdef TRACE VT_begin(event_communication); #endif MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req); wait(&recv_req); #ifdef TRACE VT_end(event_communication); #endif } END_TIMING(TIME_COMM); } } void* literal_ts = *(void**)(&ts); #pragma omp for nowait for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { if (block_rank[k*nt+k] == mype) { double * SPEC_RESTRICT tmp_a_k_k = A[k][k]; double * SPEC_RESTRICT tmp_a_k_i = A[k][i]; #ifdef CHAMELEON_TARGET #pragma omp target map(to: tmp_a_k_k[0:ts*ts]) map(tofrom: tmp_a_k_i[0:ts*ts]) device(1002) { // printf("In Task: AKK = " DPxMOD ", AKI = " DPxMOD "\n", DPxPTR(tmp_a_k_k), DPxPTR(tmp_a_k_i)); omp_trsm(tmp_a_k_k, tmp_a_k_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_k, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_trsm, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { // printf("R#%d T#%d (OS_TID:%ld): --> In Task: AKK = " DPxMOD ", AKI = " DPxMOD "\n", mype, omp_get_thread_num(), syscall(SYS_gettid), DPxPTR(tmp_a_k_k), DPxPTR(tmp_a_k_i)); omp_trsm(tmp_a_k_k, tmp_a_k_i, ts, ts); } #endif } else { double * SPEC_RESTRICT tmp_a_k_i = A[k][i]; // printf("R#%d T#%d (OS_TID:%ld): --> Before Task: B = " DPxMOD ", AKI = " DPxMOD "\n", mype, omp_get_thread_num(), syscall(SYS_gettid), DPxPTR(B), DPxPTR(tmp_a_k_i)); #ifdef CHAMELEON_TARGET #pragma omp target map(to: B[0:ts*ts]) map(tofrom: tmp_a_k_i[0:ts*ts]) device(1002) { // printf("In Task: B = " DPxMOD ", AKI = " DPxMOD "\n", DPxPTR(B), DPxPTR(tmp_a_k_i)); omp_trsm(B, tmp_a_k_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(B, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_trsm, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { // printf("R#%d T#%d (OS_TID:%ld): --> In Task: B = " DPxMOD ", AKI = " DPxMOD "\n", mype, omp_get_thread_num(), syscall(SYS_gettid), DPxPTR(B), DPxPTR(tmp_a_k_i)); omp_trsm(B, tmp_a_k_i, ts, ts); } #endif } } } #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_distributed_taskwait(0); #else #pragma omp barrier #endif #pragma omp single nowait { for (int i = k + 1; i < nt; i++) { #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 0 Begin\n", k, i, mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif if (block_rank[k*nt+i] != mype) { START_TIMING(TIME_COMM); recv_flag = 0; get_recv_flag(&recv_flag, block_rank, k+1, i-1, i, i, nt); get_recv_flag(&recv_flag, block_rank, i, i, i+1, nt-1, nt); get_recv_flag(&recv_flag, block_rank, i, i, i, i, nt); if (recv_flag) { #ifdef TRACE VT_begin(event_communication); #endif #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 1 Recieving from R#%d - Start\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid), block_rank[k*nt+i]); #endif MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req); wait(&recv_req); #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 2 Recieving from R#%d - zComplete\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid), block_rank[k*nt+i]); #endif #ifdef TRACE VT_end(event_communication); #endif } END_TIMING(TIME_COMM); } else { START_TIMING(TIME_COMM); send_cnt = 0; reset_send_flags(send_flags); send_cnt += get_send_flags(send_flags, block_rank, k+1, i-1, i, i, nt); send_cnt += get_send_flags(send_flags, block_rank, i, i, i+1, nt-1, nt); send_cnt += get_send_flags(send_flags, block_rank, i, i, i, i, nt); if (send_cnt != 0) { #ifdef TRACE VT_begin(event_communication); #endif send_cnt = 0; int exec_wait = 0; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 1 Sending to R#%d - Start\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid), dst); #endif exec_wait = 1; MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_reqs[send_cnt++]); } } if(exec_wait) { waitall(send_reqs, send_cnt); #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 2 Sending zComplete\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif } #ifdef TRACE VT_end(event_communication); #endif } END_TIMING(TIME_COMM); } #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 3 Comm finished\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif // temporary pointers to be able to define slices for offloading double * SPEC_RESTRICT tmp_a_k_i = A[k][i]; double * SPEC_RESTRICT tmp_a_i_i = A[i][i]; double * SPEC_RESTRICT tmp_c_i = C[i]; { START_TIMING(TIME_CREATE); for (int j = k + 1; j < i; j++) { // temporary pointers to be able to define slices for offloading double * SPEC_RESTRICT tmp_a_k_j = A[k][j]; double * SPEC_RESTRICT tmp_a_j_i = A[j][i]; double * SPEC_RESTRICT tmp_c_j = C[j]; if (block_rank[j*nt+i] == mype) { if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) { #ifdef CHAMELEON_TARGET #pragma omp target map(to: tmp_a_k_i[0:ts*ts], tmp_a_k_j[0:ts*ts]) map(tofrom: tmp_a_j_i[0:ts*ts]) device(1002) { omp_gemm(tmp_a_k_i, tmp_a_k_j, tmp_a_j_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { omp_gemm(tmp_a_k_i, tmp_a_k_j, tmp_a_j_i, ts, ts); } #endif } else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) { #ifdef CHAMELEON_TARGET #pragma omp target map(to: tmp_c_i[0:ts*ts], tmp_a_k_j[0:ts*ts]) map(tofrom: tmp_a_j_i[0:ts*ts]) device(1002) { omp_gemm(tmp_c_i, tmp_a_k_j, tmp_a_j_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_c_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { omp_gemm(tmp_c_i, tmp_a_k_j, tmp_a_j_i, ts, ts); } #endif } else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) { #ifdef CHAMELEON_TARGET #pragma omp target map(to: tmp_a_k_i[0:ts*ts], tmp_c_j[0:ts*ts]) map(tofrom: tmp_a_j_i[0:ts*ts]) device(1002) { omp_gemm(tmp_a_k_i, tmp_c_j, tmp_a_j_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_c_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { omp_gemm(tmp_a_k_i, tmp_c_j, tmp_a_j_i, ts, ts); } #endif } else { #ifdef CHAMELEON_TARGET #pragma omp target map(to: tmp_c_i[0:ts*ts], tmp_c_j[0:ts*ts]) map(tofrom: tmp_a_j_i[0:ts*ts]) device(1002) { omp_gemm(tmp_c_i, tmp_c_j, tmp_a_j_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_c_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_c_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { omp_gemm(tmp_c_i, tmp_c_j, tmp_a_j_i, ts, ts); } #endif } } } #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 4 Gemm Tasks created\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif if (block_rank[i*nt+i] == mype) { if (block_rank[k*nt+i] == mype) { #ifdef CHAMELEON_TARGET #pragma omp target map(tofrom: tmp_a_k_i[0:ts*ts]) map(to: tmp_a_i_i[0:ts*ts]) device(1002) { omp_syrk(tmp_a_k_i, tmp_a_i_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_i_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_syrk, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { omp_syrk(tmp_a_k_i, tmp_a_i_i, ts, ts); } #endif } else { #ifdef CHAMELEON_TARGET #pragma omp target map(tofrom: tmp_c_i[0:ts*ts]) map(to: tmp_a_i_i[0:ts*ts]) device(1002) { omp_syrk(tmp_c_i, tmp_a_i_i, ts, ts); } #elif CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_c_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_i_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_syrk, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); #else #pragma omp task { omp_syrk(tmp_c_i, tmp_a_i_i, ts, ts); } #endif } } #if PRINT_DEBUG my_print("Iteration [%03d][%03d]\tR#%d T#%d (OS_TID:%ld): --> 5 Syrk Tasks created\n", k, i,mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif END_TIMING(TIME_CREATE); } } } #if PRINT_DEBUG my_print("Iteration [%03d][998]\tR#%d T#%d (OS_TID:%ld): --> 6 Proceeding to chameleon_distributed_taskwait(...)/barrier\n", k, mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_distributed_taskwait(0); #else #pragma omp barrier #endif #if PRINT_DEBUG my_print("Iteration [%03d][999]\tR#%d T#%d (OS_TID:%ld): --> 7 Finished chameleon_distributed_taskwait(...)/barrier\n", k, mype, omp_get_thread_num(), syscall(SYS_gettid)); #endif // #if !defined(CHAMELEON) && !defined(CHAMELEON_TARGET) // #pragma omp single // { // PRINT_INTERMEDIATE_TIMINGS(omp_get_num_threads()); // } // #endif } } /* end omp parallel */ END_TIMING(TIME_TOTAL); MPI_Barrier(MPI_COMM_WORLD); #pragma omp parallel #pragma omp master PRINT_TIMINGS(omp_get_num_threads()); FREE_TIMING(); #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_finalize(); #endif free(send_flags); }
process.c
#include <stdio.h> #include <string.h> #include <stdint.h> #include <stdlib.h> #include "process.h" #include "util.h" #include "log.h" #include "dupe.h" #include "add_const.h" #include "rleft_const.h" #include "add_pattern.h" #include "fble_rot.h" typedef enum { BUF, BUF_IND, } mode; typedef enum { BUF_FIL_GLO, BUF_FIL_GLO_LOC, } unmode; typedef struct { short pos; short len; void (*func)(); void (*un_func)(); mode args; unmode un_args; short index; } flist; // input to generate dispatch table flist ops[] = { { AUTOPOS, 1, .func = (void (*)())dupe, .args = BUF, .un_func = (void (*)())un_dupe, .un_args = BUF_FIL_GLO }, // required { AUTOPOS, 1, .func = (void (*)())add_const, .args = BUF, .un_func = (void (*)())un_add_const, .un_args = BUF_FIL_GLO }, { AUTOPOS, 7, .func = (void (*)())rleft_const, .args = BUF_IND, .un_func = (void (*)())un_rleft_const, .un_args = BUF_FIL_GLO_LOC }, { AUTOPOS, 112, .func = (void (*)())fble_rot, .args = BUF_IND, .un_func = (void (*)())un_fble_rot, .un_args = BUF_FIL_GLO_LOC }, //~ { AUTOPOS, 1, .func = (void (*)())add_pattern, .args = BUF }, //~ { 128, 128, .func = (void (*)())dupe, .args = BUF_IND } }; int fill_buffer(short buf[BUFLEN], FILE * inc){ log_debug("fb-start"); int eob_pos = -1; int eof_pos = -1; for(int i = 0; i < BUFLEN; i++){ if(buf[i] == DP_EOF){ // nothing left to fill from log_debug("nothing to refill"); eof_pos = i; return eof_pos; } if(buf[i] == DP_EOB){ // start filling at i log_debug("refill buffer at %d", i); if( i == BUFLEN - 1) return eof_pos; // already full eob_pos = i; break; // quit at first EOB, because there's always one at the end } } if(eob_pos == -1){ log_error("eob_pos was not set, abort abort!"); exit(eob_pos); } log_debug("eob_pos: %d", eob_pos); for(int i = eob_pos; i < BUFLEN; i++){ int getrc = fgetc(inc); if(getrc == EOF){ buf[i] = DP_EOF; eof_pos = i; break; } else { buf[i] = getrc; } } log_debug("fb-end"); return eof_pos; } // create full dispatch table int init_fops(flist *f){ int start = 0; for(int i = 0; i < (sizeof(ops)/sizeof(ops[0])); i++){ //~ int start = i; //~ if (ops[i].pos != AUTOPOS) //~ start = ops[i].pos; int add = start; for(int j = add; j < add + ops[i].len; j++){ log_debug("inserting ops[%d] at f[%d]", i, j); if ( f[j].func != NULL ){ log_error("failed to insert ops[%d] at f[%d], as something's already there", i, ops[i].pos); return 1; } f[j] = ops[i]; f[j].index = j - add; start++; } } return 0; } int process(FILE * inc, FILE * out, int extract){ uint64_t outlen = 0; short buf[BUFLEN] = { 0 }; buf[0] = DP_EOB; flist f_ops[MODLEN] = { 0 }; int rc = init_fops(f_ops); if (rc != 0) return rc; fill_buffer(buf, inc); // consume until all but EOF have been processed and written out while(buf[0] != DP_EOF){ // debug if(log_get_level() <= LOG_DEBUG){ printf("buf: "); for(int i = 0; i < BUFLEN; i++){ if(buf[i] > 255) break; printf("%x ", buf[i]); } printf("\n"); } unit units[MODLEN] = { 0 }; // extraction funcs unit (*buf_fil_glo_func)(short *, FILE *, short) = NULL; unit (*buf_fil_glo_loc_func)(short *, FILE *, short, short) = NULL; // compression funcs unit (*buf_func)(short *) = NULL; unit (*buf_ind_func)(short *, short) = NULL; int failure = 0; if(extract){ #pragma omp parallel for for(int i = 0; i < MODLEN; i++){ if(f_ops[i].un_func == NULL) continue; // unused op switch(f_ops[i].un_args){ case BUF_FIL_GLO: buf_fil_glo_func = (unit (*)(short *, FILE *, short))f_ops[i].un_func; units[i] = buf_fil_glo_func(buf, out, i); break; case BUF_FIL_GLO_LOC: buf_fil_glo_loc_func = (unit (*)(short *, FILE *, short, short))f_ops[i].un_func; units[i] = buf_fil_glo_loc_func(buf, out, i, f_ops[i].index); break; default: log_error("unknown f_ops.args %d", f_ops[i].args); failure = 1; } } } else { #pragma omp parallel for for(int i = 0; i < MODLEN; i++){ if(f_ops[i].func == NULL) continue; // unused op switch(f_ops[i].args){ case BUF: buf_func = (unit (*)(short *))f_ops[i].func; units[i] = buf_func(buf); break; case BUF_IND: buf_ind_func = (unit (*)(short *, short))f_ops[i].func; units[i] = buf_ind_func(buf, f_ops[i].index); break; default: log_error("unknown f_ops.args %d", f_ops[i].args); failure = 1; } } } if(failure) return failure; // find most compressed unit int best = 0; unit best_unit = units[best]; for(int i = 0; i < MODLEN; i++){ if( (units[i].consumed - units[i].payload_used) > (best_unit.consumed - best_unit.payload_used) ){ log_debug("found new best units[%d]", i); best = i; best_unit = units[best]; } } int consume = 1; int outbytes = 1; if(best_unit.consumed > 0){ consume = best_unit.consumed; if(extract){ outbytes = best_unit.payload_used; } else { // write out new chunk if(log_get_level() <= LOG_DEBUG) printf("out:"); for(int i = 0; i < DELLEN; i++){ fputc(best, out); if(log_get_level() <= LOG_DEBUG) printf("%3x", best); } for(int i = 0; i < best_unit.payload_used; i++){ fputc(best_unit.payload[i], out); if(log_get_level() <= LOG_DEBUG) printf("%3x", best_unit.payload[i]); } if(log_get_level() <= LOG_DEBUG) printf("\n"); outbytes = DELLEN + best_unit.payload_used; } } else { fputc(buf[0], out); } outlen += outbytes; // big file prog //~ static int deleteme = 0; //~ deleteme += outbytes; //~ if(deleteme > 1000000){ //~ deleteme %= 1000000; //~ log_info("outlen: %lu\n", outlen); //~ } log_debug("outlen: %lu", outlen); // make room in buffer void *mm_rc = NULL; mm_rc = memmove(buf, buf+consume, sizeof(short) * (BUFLEN - consume) ); if (mm_rc != buf){ log_error("couldn't memmove after dupe"); return 1; } // set new EOB int new_eob = BUFLEN - ((buf + consume) - buf); log_debug("new EOB - %d", new_eob); buf[new_eob] = DP_EOB; // refill buffer fill_buffer(buf, inc); // print new buffer if(log_get_level() <= LOG_DEBUG){ printf("buf: "); for(int i = 0; i < BUFLEN; i++){ if(buf[i] > 255) break; printf("%x ", buf[i]); } printf("\n"); } } log_info("outlen -- %luB", outlen); return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-2,4),ceild(8*t2-Nz-3,16));t3<=min(floord(4*Nt+Ny-9,16),floord(4*t1+Ny-1,16));t3++) { for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(16*t3+Nx+3,128));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),4*t3+2),32*t4+30);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_sgemm_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4to1_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 4u * 4, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size / 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x12 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); img0 += size * 4; tmpptr += 48; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); img0 += size * 4; tmpptr += 32; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); img0 += size * 4; tmpptr += 16; } } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { v4f32 _val = (v4f32)__msa_ld_w(img0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); img0 += size * 4; tmpptr += 4; } } } } int nn_outch = outch / 4; int remain_outch_start = nn_outch * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); float* outptr2 = top_blob.channel(p + 2); float* outptr3 = top_blob.channel(p + 3); const float zeros[4] = {0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p / 4); int nn = inch * maxk * 4; // inch always > 0 v4i32 _bias = __msa_ld_w(biasptr, 0); v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0); v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 0); v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 0); v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 1); v4f32 _sum4 = (v4f32)__msa_splati_w(_bias, 1); v4f32 _sum5 = (v4f32)__msa_splati_w(_bias, 1); v4f32 _sum6 = (v4f32)__msa_splati_w(_bias, 2); v4f32 _sum7 = (v4f32)__msa_splati_w(_bias, 2); v4f32 _sum8 = (v4f32)__msa_splati_w(_bias, 2); v4f32 _sum9 = (v4f32)__msa_splati_w(_bias, 3); v4f32 _suma = (v4f32)__msa_splati_w(_bias, 3); v4f32 _sumb = (v4f32)__msa_splati_w(_bias, 3); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 96); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0); v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0); v4i32 _w0123 = __msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0)); _sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0)); _sum2 = __msa_fmadd_w(_sum2, _val2, (v4f32)__msa_splati_w(_w0123, 0)); _sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 1)); _sum4 = __msa_fmadd_w(_sum4, _val1, (v4f32)__msa_splati_w(_w0123, 1)); _sum5 = __msa_fmadd_w(_sum5, _val2, (v4f32)__msa_splati_w(_w0123, 1)); _sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 2)); _sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 2)); _sum8 = __msa_fmadd_w(_sum8, _val2, (v4f32)__msa_splati_w(_w0123, 2)); _sum9 = __msa_fmadd_w(_sum9, _val0, (v4f32)__msa_splati_w(_w0123, 3)); _suma = __msa_fmadd_w(_suma, _val1, (v4f32)__msa_splati_w(_w0123, 3)); _sumb = __msa_fmadd_w(_sumb, _val2, (v4f32)__msa_splati_w(_w0123, 3)); tmpptr += 12; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 8, 0); __msa_st_w((v4i32)_sum3, outptr1, 0); __msa_st_w((v4i32)_sum4, outptr1 + 4, 0); __msa_st_w((v4i32)_sum5, outptr1 + 8, 0); __msa_st_w((v4i32)_sum6, outptr2, 0); __msa_st_w((v4i32)_sum7, outptr2 + 4, 0); __msa_st_w((v4i32)_sum8, outptr2 + 8, 0); __msa_st_w((v4i32)_sum9, outptr3, 0); __msa_st_w((v4i32)_suma, outptr3 + 4, 0); __msa_st_w((v4i32)_sumb, outptr3 + 8, 0); outptr0 += 12; outptr1 += 12; outptr2 += 12; outptr3 += 12; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p / 4); int nn = inch * maxk * 4; // inch always > 0 v4i32 _bias = __msa_ld_w(biasptr, 0); v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0); v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 0); v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 1); v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 1); v4f32 _sum4 = (v4f32)__msa_splati_w(_bias, 2); v4f32 _sum5 = (v4f32)__msa_splati_w(_bias, 2); v4f32 _sum6 = (v4f32)__msa_splati_w(_bias, 3); v4f32 _sum7 = (v4f32)__msa_splati_w(_bias, 3); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 64); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0); v4i32 _w0123 = __msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0)); _sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0)); _sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 1)); _sum3 = __msa_fmadd_w(_sum3, _val1, (v4f32)__msa_splati_w(_w0123, 1)); _sum4 = __msa_fmadd_w(_sum4, _val0, (v4f32)__msa_splati_w(_w0123, 2)); _sum5 = __msa_fmadd_w(_sum5, _val1, (v4f32)__msa_splati_w(_w0123, 2)); _sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 3)); _sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 3)); tmpptr += 8; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr1, 0); __msa_st_w((v4i32)_sum3, outptr1 + 4, 0); __msa_st_w((v4i32)_sum4, outptr2, 0); __msa_st_w((v4i32)_sum5, outptr2 + 4, 0); __msa_st_w((v4i32)_sum6, outptr3, 0); __msa_st_w((v4i32)_sum7, outptr3 + 4, 0); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p / 4); int nn = inch * maxk * 4; // inch always > 0 v4i32 _bias = __msa_ld_w(biasptr, 0); v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0); v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 1); v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 2); v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 3); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 32); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4i32 _w0123 = __msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0)); _sum1 = __msa_fmadd_w(_sum1, _val0, (v4f32)__msa_splati_w(_w0123, 1)); _sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 2)); _sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 3)); tmpptr += 4; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr1, 0); __msa_st_w((v4i32)_sum2, outptr2, 0); __msa_st_w((v4i32)_sum3, outptr3, 0); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr0 = kernel.channel(p / 4); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_ld_w(biasptr, 0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 8); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = __msa_fill_w_f32(*tmpptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); kptr0 += 4; } outptr0[0] = _sum[0]; outptr1[0] = _sum[1]; outptr2[0] = _sum[2]; outptr3[0] = _sum[3]; outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = __msa_fill_w_f32(bias0); v4f32 _sum1 = __msa_fill_w_f32(bias0); v4f32 _sum2 = __msa_fill_w_f32(bias0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 64); __builtin_prefetch(kptr0 + 8); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0); v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0); v4f32 _w0 = __msa_fill_w_f32(*kptr0); _sum0 = __msa_fmadd_w(_sum0, _w0, _val0); _sum1 = __msa_fmadd_w(_sum1, _w0, _val1); _sum2 = __msa_fmadd_w(_sum2, _w0, _val2); tmpptr += 12; kptr0 += 1; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 8, 0); outptr0 += 12; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = __msa_fill_w_f32(bias0); v4f32 _sum1 = __msa_fill_w_f32(bias0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 32); __builtin_prefetch(kptr0 + 8); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0); v4f32 _w0 = __msa_fill_w_f32(*kptr0); _sum0 = __msa_fmadd_w(_sum0, _w0, _val0); _sum1 = __msa_fmadd_w(_sum1, _w0, _val1); tmpptr += 8; kptr0 += 1; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); outptr0 += 8; } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = __msa_fill_w_f32(bias0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 16); __builtin_prefetch(kptr0 + 8); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4f32 _w0 = __msa_fill_w_f32(*kptr0); _sum0 = __msa_fmadd_w(_sum0, _w0, _val0); tmpptr += 4; kptr0 += 1; } __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr0 = kernel.channel(p / 4 + p % 4); int nn = inch * maxk; // inch always > 0 float sum0 = bias0; v4f32 _sum0 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 16); __builtin_prefetch(kptr0 + 16); v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); tmpptr += 4; kptr0 += 4; } sum0 += __msa_fhadd_w(_sum0); outptr0[0] = sum0; outptr0 += 1; } } } static void convolution_im2col_sgemm_transform_kernel_pack4to1_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = pb-pa-maxk-inch/pa-outch/pb Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(4 * 4 * maxk, inch / 4, outch / 4 + outch % 4); int q = 0; for (; q + 3 < outch; q += 4) { float* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } for (; q < outch; q++) { const Mat k0 = kernel.channel(q); float* g00 = kernel_tm.channel(q / 4 + q % 4); for (int p = 0; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 4; j++) { const float* k00 = k0.row(p + j); g00[0] = k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { v4f32 _val = (v4f32)__msa_ld_w(sptr, 0); __msa_st_w((v4i32)_val, ptr, 0); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4to1_msa(bottom_im2col, top_blob, kernel, _bias, opt); }
OPENMP_bail_out.c
/********************************************************************** Name: bail_out Purpose: Exit gracefully when an OpenMP thread has encountered an error inside a parallel region Arguments: error code (zero for no error). Returns: nothing, but the program terminates with a nonzero exit status Notes: This function must be called by all threads in the team. Multiple threads may have tried to update the shared error variable at the same time, so this needs to be done atomically if we want to guarantee that the value of 1 is put into error. In our case, however, we merely want to know if the value is different from zero, so we do not need atomicity. History: Written by Rob Van der Wijngaart, July 2006 **********************************************************************/ #include <par-res-kern_general.h> void bail_out(int error) { #pragma omp barrier if (error != 0) exit(EXIT_FAILURE); }
micmat.c
// Copyright (c) 2014, Oren Rippel and Ryan P. Adams // All rights reserved. // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdio.h> #include <stdlib.h> #include <malloc.h> #include <stdint.h> #include <omp.h> #include <mkl.h> #include <math.h> #include <offload.h> #ifndef MIC_DEV #define MIC_DEV 0 #endif #define ALLOC alloc_if(1) free_if(0) #define FREE alloc_if(0) free_if(1) #define ALLOC_FREE alloc_if(1) free_if(1) #define REUSE alloc_if(0) free_if(0) void tester(){ int N = 100000; float *A = _mm_malloc(N*sizeof(float), 64); float *B = _mm_malloc(N*sizeof(float), 64); #pragma omp parallel for for (int n = 0; n < N; n++) A[n] = 0.f; #pragma omp parallel for for (int n = 0; n < N; n++) B[n] = A[n]; float S = 0.f; #pragma omp parallel for for (int n = 0; n < N; n++) S = S + B[n]; printf("%f", S); } float *allocate_host(int N){ // float *A = _mm_malloc(N*sizeof(float), 64); float *A = (float *) malloc(N*sizeof(float)); if (A == NULL){ fprintf(stderr, "Out of memory.\n"); } return A; } int *allocate_host_int(int N){ int *A = (int *) malloc(N*sizeof(int)); if (A == NULL){ fprintf(stderr, "Out of memory.\n"); } return A; } void fill_zeros(int N, float *restrict A, int offloaded){ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A[n] = 0.f; } } void fill_zeros_int(int N, int *restrict A, int offloaded){ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A[n] = 0; } } void fill_ones(int N, float *restrict A, int offloaded){ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) { int i; #pragma omp parallel for private(i) for (i = 0; i < N; i++) A[i] = 1.f; } } // convert linear index to tensor index __attribute__((target(mic:MIC_DEV))) int ti(int a, int b, int c, int d, int B, int C, int D){ return (a*B*C*D + b*C*D + c*D + d); } // convert tensor index to linear index __attribute__((target(mic:MIC_DEV))) int it(int i, int dim, int B, int C, int D){ int s; if (dim == 0) s = i / (B*C*D); else if (dim == 1) s = (i % (B*C*D)) / (C*D); else if (dim == 2) s = (i % (C*D)) / D; else if (dim == 3) s = i % D; return s; } __attribute__((target(mic:MIC_DEV))) float *zeros_mic(int N){ // float *restrict A = _mm_malloc(N*sizeof(float), 64); float *restrict A = (float *) malloc(N*sizeof(float)); int i; #pragma omp parallel for private(i) for (i = 0; i < N; i++) A[i] = 0.f; return A; } __attribute__((target(mic:MIC_DEV))) float *ones_mic(int N){ // float *restrict A = _mm_malloc(N*sizeof(float), 64); float *restrict A = (float *)malloc(N*sizeof(float)); int i; #pragma omp parallel for private(i) for (i = 0; i < N; i++) A[i] = 1.f; return A; } // VSLStreamStatePtr initialize_stream(VSLStreamStatePtr stream){ // #pragma offload target(mic:MIC_DEV) \ // inout(stream) // { // vslNewStream(&stream, VSL_BRNG_MCG31, 1); // } // return stream; // } void fill_randn(int skip_num, int N, float *restrict A, float mu, float sigma){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, 1); vslSkipAheadStream(stream, skip_num); vsRngGaussian(VSL_RNG_METHOD_GAUSSIAN_ICDF, stream, N, A, mu, sigma); } } void fill_uniform(int skip_num, int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, 1); vslSkipAheadStream(stream, skip_num); vsRngUniform(VSL_RNG_METHOD_UNIFORM_STD, stream, N, A, 0.0, 1.0); } } float *slice_inds(int N, int *restrict indices, float *restrict A, int indices_offloaded, int offloaded){ float *restrict A_sliced = allocate_host(N); if (indices_offloaded == 0){ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ in(indices:length(N) ALLOC_FREE) \ nocopy(A_sliced:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A_sliced[n] = A[indices[n]]; } } else{ #pragma offload target(mic:MIC_DEV) if(offloaded == 1)\ in(A:length(0) REUSE) \ in(indices:length(0) REUSE) \ nocopy(A_sliced:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A_sliced[n] = A[indices[n]]; } } return A_sliced; } float *slice_cols(int N, int *restrict indices, int ROWS, int COLS, float *restrict A, int indices_offloaded, int offloaded){ float *restrict A_sliced = allocate_host(N*ROWS); if (indices_offloaded == 0){ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ in(indices:length(N) ALLOC_FREE) \ nocopy(A_sliced:length(N*ROWS) ALLOC) { int n, r; #pragma omp parallel for collapse(2) private(n, r) for (r = 0; r < ROWS; r++) for (n = 0; n < N; n++) A_sliced[r*N + n] = A[r*COLS + indices[n]]; } } else{ #pragma offload target(mic:MIC_DEV) if(offloaded == 1)\ in(A:length(0) REUSE) \ in(indices:length(0) REUSE) \ nocopy(A_sliced:length(N*ROWS) ALLOC) { int n, r; #pragma omp parallel for collapse(2) private(n, r) for (r = 0; r < ROWS; r++) for (n = 0; n < N; n++) A_sliced[r*N + n] = A[r*COLS + indices[n]]; } } return A_sliced; } float *slice_rows(int N, int *restrict indices, int ROWS, int COLS, float *restrict A, int indices_offloaded, int offloaded){ float *restrict A_sliced = allocate_host(N*COLS); if (indices_offloaded == 0){ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ in(indices:length(N) ALLOC_FREE) \ nocopy(A_sliced:length(N*COLS) ALLOC) { int c, n; #pragma omp parallel for collapse(2) private(n, c) for (c = 0; c < COLS; c++) for (n = 0; n < N; n++) A_sliced[n*COLS + c] = A[indices[n]*COLS + c]; } } else{ #pragma offload target(mic:MIC_DEV) if(offloaded == 1)\ in(A:length(0) REUSE) \ in(indices:length(0) REUSE) \ nocopy(A_sliced:length(N*COLS) ALLOC) { int c, n; #pragma omp parallel for collapse(2) private(n, c) for (c = 0; c < COLS; c++) for (n = 0; n < N; n++) A_sliced[n*COLS + c] = A[indices[n]*COLS + c]; } } return A_sliced; } void print_slice_small(int ROWS, int COLS, float *A){ printf("["); for (int r = 0; r < ROWS; r++){ printf("["); for (int c = 0; c < COLS; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("]"); if (r < ROWS-1) printf("\n"); } printf("]\n"); } void print_slice_big(float *A){ int COLS = 6, ROWS = 6; printf("["); for (int r = 0; r < 3; r++){ printf("["); for (int c = 0; c < 3; c++){ printf("%f ", A[r*COLS + c]); } printf("... "); for (int c = COLS-3; c < COLS; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("]\n"); } printf("...\n"); for (int r = ROWS-3; r < ROWS; r++){ printf("["); for (int c = 0; c < 3; c++){ printf("%f ", A[r*COLS + c]); } printf("... "); for (int c = COLS-3; c < COLS; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("]"); if (r < ROWS-1) printf("\n"); } printf("]"); printf("\n"); } void print_slice_big_col(int ROWS, float *A){ int COLS = 6; printf("["); for (int r = 0; r < ROWS; r++){ printf("["); for (int c = 0; c < 3; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("... "); for (int c = COLS-3; c < COLS; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("]"); if (r < ROWS-1) printf("\n"); } printf("]"); printf("\n"); } void print_slice_big_row(int COLS, float *A){ int ROWS = 6; printf("["); for (int r = 0; r < 3; r++){ printf("["); for (int c = 0; c < COLS; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("]\n"); } printf("...\n"); for (int r = ROWS-3; r < ROWS; r++){ printf("["); for (int c = 0; c < COLS; c++){ printf("%f", A[r*COLS + c]); if (c < COLS - 1) printf(" "); } printf("]"); if (r < ROWS-1) printf("\n"); } printf("]"); printf("\n"); } void print_slice(int ROWS, int COLS, float *A, int offloaded){ float *restrict B; if (ROWS <= 6 && COLS <= 6){ B = allocate_host(ROWS*COLS); #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ out(B:length(ROWS*COLS) ALLOC_FREE) { for (int n = 0; n < ROWS*COLS; n++) B[n] = A[n]; } print_slice_small(ROWS, COLS, B); } else if (ROWS <= 6 && COLS > 6){ B = allocate_host(6*ROWS); #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ out(B:length(6*ROWS) ALLOC_FREE) { for (int r = 0; r < ROWS; r++) for (int c = 0; c < 3; c++) B[r*6 + c] = A[r*COLS + c]; for (int r = 0; r < ROWS; r++) for (int c = COLS-3; c < COLS; c++) B[r*6 + c-COLS+6] = A[r*COLS + c]; } print_slice_big_col(ROWS, B); } else if (ROWS > 6 && COLS <= 6){ B = allocate_host(6*COLS); #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ out(B:length(6*COLS) ALLOC_FREE) { for (int r = 0; r < 3; r++) for (int c = 0; c < COLS; c++) B[r*COLS + c] = A[r*COLS + c]; for (int r = ROWS-3; r < ROWS; r++) for (int c = 0; c < COLS; c++) B[(r-ROWS+6)*COLS + c] = A[r*COLS + c]; } print_slice_big_row(COLS, B); } else { B = allocate_host(36); #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) REUSE) \ out(B:length(36) ALLOC_FREE) { for (int r = 0; r < 3; r++) for (int c = 0; c < 3; c++) B[r*6 + c] = A[r*COLS + c]; for (int r = 0; r < 3; r++) for (int c = COLS-3; c < COLS; c++) B[r*6 + c-COLS+6] = A[r*COLS + c]; for (int r = ROWS-3; r < ROWS; r++) for (int c = 0; c < 3; c++) B[(r-ROWS+6)*6 + c] = A[r*COLS + c]; for (int r = ROWS-3; r < ROWS; r++) for (int c = COLS-3; c < COLS; c++) B[(r-ROWS+6)*6 + c-COLS+6] = A[r*COLS + c]; } print_slice_big(B); } } void print_slice_mic(int ROWS, int COLS, float *A){ printf("Object on MIC:\n"); float *restrict B = allocate_host(36); #pragma offload target(mic:MIC_DEV)\ in(A:length(0) REUSE) \ out(B:length(36) ALLOC_FREE) { for (int r = 0; r < 3; r++) for (int c = 0; c < 3; c++) B[r*6 + c] = A[r*COLS + c]; for (int r = 0; r < 3; r++) for (int c = COLS-3; c < COLS; c++) B[r*6 + c-COLS+6] = A[r*COLS + c]; for (int r = ROWS-3; r < ROWS; r++) for (int c = 0; c < 3; c++) B[(r-ROWS+6)*6 + c] = A[r*COLS + c]; for (int r = ROWS-3; r < ROWS; r++) for (int c = COLS-3; c < COLS; c++) B[(r-ROWS+6)*6 + c-COLS+6] = A[r*COLS + c]; } // print_slice(6, 6, B); free(B); } void offload_mic(int N, float *restrict A){ _Offload_status mic_status; OFFLOAD_STATUS_INIT(mic_status); #pragma offload_transfer target(mic:MIC_DEV) status(mic_status) \ in(A:length(N) ALLOC) if (!mic_status.result == OFFLOAD_SUCCESS){ printf("Offload failed.\n"); if (mic_status.result == OFFLOAD_OUT_OF_MEMORY) { printf("Offload failed due to insufficient memory.\n"); } } } void offload_mic_int(int N, int *restrict A){ _Offload_status mic_status; OFFLOAD_STATUS_INIT(mic_status); #pragma offload_transfer target(mic:MIC_DEV) status(mic_status) \ in(A:length(N) ALLOC) if (!mic_status.result == OFFLOAD_SUCCESS){ printf("Offload failed.\n"); if (mic_status.result == OFFLOAD_OUT_OF_MEMORY) { printf("Offload failed due to insufficient memory.\n"); } } } void pull_mic(int N, float *restrict A){ #pragma offload_transfer target(mic:MIC_DEV) \ out(A:length(N) REUSE) } float *unalign_host(int N, float *restrict A){ float *restrict B = allocate_host(N); int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) B[n] = A[n]; return B; } float output_float(float *restrict A){ float S = A[0]; return S; } void copy(int N, float *restrict A, float *restrict B, int offloaded){ if (offloaded == 0){ int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A[n] = B[n]; } else{ #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(B:length(0) REUSE) \ nocopy(A:length(N) ALLOC) { cblas_scopy(N, B, 1, A, 1); } } } // copies B into A on host void replace_host(int N, float *restrict A, float *restrict B){ int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A[n] = B[n]; // cblas_scopy(N, B, 1, A, 1); } void replace_mic(int N, float *restrict A, float *restrict B){ #pragma offload target(mic:MIC_DEV) \ in(B:length(0) REUSE) \ in(A:length(0) REUSE) { cblas_scopy(N, B, 1, A, 1); } } void replace_host_int(int N, int *restrict A, int *restrict B){ int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A[n] = B[n]; // cblas_scopy(N, B, 1, A, 1); } void replace_mic_int(int N, int *restrict A, int *restrict B){ #pragma offload target(mic:MIC_DEV) \ in(B:length(0) REUSE) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A[n] = B[n]; } } // copies B into A on host void replace_partial_host(int N_A, int SHIFT_A, float *restrict A, int N_B, int SHIFT_B, float *restrict B){ int n; #pragma omp parallel for private(n) for (n = 0; n < N_B; n++) A[SHIFT_A + n] = B[SHIFT_B + n]; // cblas_scopy(N, B, 1, A, 1); } void replace_partial_mic(int N_A, int SHIFT_A, float *restrict A, int N_B, int SHIFT_B, float *restrict B){ #pragma offload target(mic:MIC_DEV) \ in(B:length(0) REUSE) \ in(A:length(0) REUSE) { cblas_scopy(N_B, B + SHIFT_B, 1, A + SHIFT_A, 1); } } float *get_partial(int N, int SHIFT, float *restrict A){ float *restrict S = A + SHIFT; return S; } // copies A into existing memory of A on MIC void push_mic(int N, float *restrict A){ _Offload_status mic_status; OFFLOAD_STATUS_INIT(mic_status); #pragma offload_transfer target(mic:MIC_DEV) status(mic_status) \ in(A:length(N) REUSE) if (!mic_status.result == OFFLOAD_SUCCESS){ printf("Offload failed.\n"); if (mic_status.result == OFFLOAD_OUT_OF_MEMORY) { printf("Offload failed due to insufficient memory.\n"); } } } float *cast_float(int N, int *restrict A, int offloaded){ float *restrict A_float = allocate_host(N); #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) FREE) \ nocopy(A_float:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A_float[n] = (float) A[n]; } free(A); return A_float; } int *cast_int(int N, float *restrict A, int offloaded){ int *restrict A_int = allocate_host_int(N); #pragma offload target(mic:MIC_DEV) if(offloaded == 1) \ in(A:length(0) FREE) \ nocopy(A_int:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) A_int[n] = (int) A[n]; } free(A); return A_int; } void free_host(int N, float *A){ // _mm_free(A); free(A); } void free_host_int(int N, int *A){ // _mm_free(A); free(A); } void free_mic(int N, float *restrict A){ #pragma offload_transfer target(mic:MIC_DEV) \ nocopy(A:length(N) FREE) } void free_mic_int(int N, int *restrict A){ #pragma offload_transfer target(mic:MIC_DEV) \ nocopy(A:length(N) FREE) } void expo(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsExp(1, A+n, A+n); } } void clip(int N, float *restrict A, float LOWER, float UPPER){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if (A[n] < LOWER) A[n] = LOWER; if (A[n] > UPPER) A[n] = UPPER; } } } void clip_low(int N, float *restrict A, float LOWER){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if (A[n] < LOWER) A[n] = LOWER; } } } void flooro(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsFloor(1, A+n, A+n); } } void sign(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if (A[n] >= 0) A[n] = 1.f; else A[n] = -1.f; } } } float *equal(int N, float *restrict A, float *restrict B){ float *restrict S = allocate_host(N); float max_AB; #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(B:length(0) REUSE) \ nocopy(S:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ max_AB = fmaxf(fabsf(A[n]), fabsf(B[n])); if (fabsf(A[n] - B[n]) <= 0.00001*max_AB) S[n] = 1.f; else S[n] = 0.f; } } return S; } float *leq(int N, float *restrict A, float B){ float *restrict S = allocate_host(N); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if (A[n] <= B) S[n] = 1.f; else S[n] = 0.f; } } return S; } float *geq(int N, float *restrict A, float B){ float *restrict S = allocate_host(N); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if (A[n] >= B) S[n] = 1.f; else S[n] = 0.f; } } return S; } float *greater(int N, float *restrict A, float B){ float *restrict S = allocate_host(N); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if (A[n] > B) S[n] = 1.f; else S[n] = 0.f; } } return S; } float *elementwise_or(int N, float *restrict A, float *restrict B){ float *restrict S = allocate_host(N); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(B:length(0) REUSE) \ nocopy(S:length(N) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++){ if ((A[n] != 0.f) || (B[n] != 0.f)) S[n] = 1.f; else S[n] = 0.f; } } return S; } float *labels_to_vectors(int N, int K, float *restrict A){ float *restrict S = allocate_host(N*K); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(N*K) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < K*N; n++) S[n] = 0; #pragma omp parallel for private(n) for (n = 0; n < N; n++) S[n*K + (int) A[n]] = 1; } return S; } void lg(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsLn(1, A+n, A+n); // vsLn(N, A, A); } } void abso(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsAbs(1, A+n, A+n); // vsAbs(N, A, A); } } void sqrto(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsSqrt(1, A+n, A+n); // vsSqrt(N, A, A); } } float normo(int N, float *restrict A){ float S; #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { S = cblas_snrm2(N, A, 1); } return S; } void powo(int N, float *restrict A, float b){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsPowx(1, A+n, b, A+n); // vsPowx(N, A, b, A); } } void T(int ROWS, int COLS, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { mkl_simatcopy('R', 'T', ROWS, COLS, 1.0, A, COLS, ROWS); } } void scale(int N, float *restrict A, float c){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { cblas_sscal(N, c, A, 1); } } void mult(int ROWS_A, int COLS_A, float *restrict A, int ROWS_X, int COLS_X, float *restrict X){ if (COLS_X == 1 && ROWS_X > 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { int r, c; #pragma omp parallel for collapse(2) private(r, c) for (r = 0; r < ROWS_A; r++) for (c = 0; c < COLS_A; c++) A[r*COLS_A + c] = A[r*COLS_A + c] * X[r]; } } else if (ROWS_X == 1 && COLS_X > 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { int r, c; #pragma omp parallel for collapse(2) private(r, c) for (c = 0; c < COLS_A; c++) for (r = 0; r < ROWS_A; r++) A[r*COLS_A + c] = A[r*COLS_A + c] * X[c]; } } else if (ROWS_X == 1 && COLS_X == 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { cblas_sscal(ROWS_A*COLS_A, X[0], A, 1); } } else if (ROWS_X == ROWS_A && COLS_X == COLS_A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < ROWS_A*COLS_A; n++) vsMul(1, A+n, X+n, A+n); // vsMul(ROWS_A*COLS_A, A, X, A); } } else printf("Update matrix dimensions don\'t match."); } void invert(int N, float *restrict A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < N; n++) vsInv(1, A+n, A+n); // vsInv(N, A, A); } } void divide(int ROWS_A, int COLS_A, float *restrict A, int ROWS_X, int COLS_X, float *restrict X){ if (COLS_X == 1 && ROWS_X > 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { int r, c; #pragma omp parallel for collapse(2) private(r, c) for (r = 0; r < ROWS_A; r++) for (c = 0; c < COLS_A; c++) A[r*COLS_A + c] = A[r*COLS_A + c] / X[r]; } } else if (ROWS_X == 1 && COLS_X > 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { int r, c; #pragma omp parallel for collapse(2) private(r, c) for (c = 0; c < COLS_A; c++) for (r = 0; r < ROWS_A; r++) A[r*COLS_A + c] = A[r*COLS_A + c] / X[c]; } } else if (ROWS_X == 1 && COLS_X == 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { cblas_sscal(ROWS_A*COLS_A, 1.0/X[0], A, 1); } } else if (ROWS_X == ROWS_A && COLS_X == COLS_A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < ROWS_A*COLS_A; n++) vsDiv(1, A+n, X+n, A+n); // vsDiv(ROWS_A*COLS_A, A, X, A); } } else printf("Update matrix dimensions don\'t match."); } void update(int ROWS_A, int COLS_A, float *restrict A, int ROWS_X, int COLS_X, float *restrict X, float ALPHA){ if (COLS_X == 1 && ROWS_X > 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { float *restrict Y = ones_mic(COLS_A); cblas_sger(CblasRowMajor, ROWS_A, COLS_A, ALPHA, X, 1, Y, 1, A, COLS_A); // _mm_free(Y); free(Y); } } else if (ROWS_X == 1 && COLS_X > 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { float *restrict Y = ones_mic(ROWS_A); cblas_sger(CblasRowMajor, ROWS_A, COLS_A, ALPHA, Y, 1, X, 1, A, COLS_A); // _mm_free(Y); free(Y); } } else if (ROWS_X == 1 && COLS_X == 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { float *restrict Y = ones_mic(ROWS_A*COLS_A); cblas_saxpy(ROWS_A*COLS_A, X[0]*ALPHA, Y, 1, A, 1); // _mm_free(Y); free(Y); } } else if (ROWS_X == ROWS_A && COLS_X == COLS_A){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(X:length(0) REUSE) { cblas_saxpy(ROWS_A*COLS_A, ALPHA, X, 1, A, 1); } } else printf("Update matrix dimensions don\'t match."); } void update_const(int N, float *restrict A, float c){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { float *restrict Y = ones_mic(N); cblas_saxpy(N, c, Y, 1, A, 1); // _mm_free(Y); free(Y); } } // void fill_bernoulli(int skip_num, int N, float *restrict A, float p){ // fill_uniform(skip_num, N, A); // update_const(N, A, p); // flooro(N, A); // } void fill_bernoulli(int skip_num, int N, float *restrict A, float p){ fill_uniform(skip_num, N, A); update_const(N, A, p); flooro(N, A); // #pragma offload target(mic:MIC_DEV) \ // in(A:length(0) REUSE) // { // int *B = (int *)malloc(N*sizeof(int)); // VSLStreamStatePtr stream; // vslNewStream(&stream, VSL_BRNG_MCG31, 1); // vslSkipAheadStream(stream, skip_num); // viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, N, B, p); // for (int n = 0; n < N; n++) A[n] = (float) B[n]; // free(B); // } } float *dot(int ROWS_A, int COLS_A, int T_A, float *restrict A, int COLS_B, int T_B, float *restrict B){ // float *restrict C = (float *)_mm_malloc(ROWS_A*COLS_B*sizeof(float), 64); float *restrict C = allocate_host(ROWS_A*COLS_B); char TRANSPOSE_A = 'N', TRANSPOSE_B = 'N'; if (T_A == 1) TRANSPOSE_A = 'T'; if (T_B == 1) TRANSPOSE_B = 'T'; float ALPHA = 1.0f, BETA = 0.0f; #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(B:length(0) REUSE) \ nocopy(C:length(ROWS_A*COLS_B) ALLOC) { cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, ROWS_A, COLS_B, COLS_A, ALPHA, (float *)A, COLS_A, (float *)B, COLS_B, BETA, (float *)C, COLS_B); } return C; } float *dot_vec(int N, float *restrict A, float *restrict B){ float *restrict S = allocate_host(2); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(B:length(0) REUSE) \ nocopy(S:length(2) ALLOC) { S[0] = cblas_sdot(N, A, 1, B, 1); } return S; } void dot_replace(int ROWS_A, int COLS_A, int T_A, float *restrict A, int ROWS_B, int COLS_B, int T_B, float *restrict B, float BETA, float *restrict C){ CBLAS_TRANSPOSE TRANSPOSE_A = CblasNoTrans, TRANSPOSE_B = CblasNoTrans; int ROWS_LEFT = ROWS_A, ROWS_RIGHT = ROWS_B, COLS_LEFT = COLS_A, COLS_RIGHT = COLS_B; if (T_A == 1){ TRANSPOSE_A = CblasTrans; ROWS_LEFT = COLS_A; COLS_LEFT = ROWS_A; } if (T_B == 1){ TRANSPOSE_B = CblasTrans; ROWS_RIGHT = COLS_B; COLS_RIGHT = ROWS_B; } float ALPHA = 1.0f; #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ in(B:length(0) REUSE) \ in(C:length(0) REUSE) { cblas_sgemm(CblasRowMajor, TRANSPOSE_A, TRANSPOSE_B, ROWS_LEFT, COLS_RIGHT, COLS_LEFT, ALPHA, (float *)A, COLS_A, (float *)B, COLS_B, BETA, (float *)C, COLS_RIGHT); } } void __attribute__((target(mic:MIC_DEV))) dot_mic(int ROWS_A, int COLS_A, float *restrict A, int COLS_B, float *restrict B, float *restrict C){ char TRANSPOSE_A = 'N', TRANSPOSE_B = 'N'; float ALPHA = 1.0f, BETA = 0.0f; cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, ROWS_A, COLS_B, COLS_A, ALPHA, (float *)A, COLS_A, (float *)B, COLS_B, BETA, (float *)C, COLS_B); } float *sum_axis(int ROWS_A, int COLS_A, float *restrict A, int AXIS){ float *restrict S; if (AXIS == 0){ S = allocate_host(COLS_A); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(COLS_A) ALLOC) { float *restrict Y = ones_mic(ROWS_A); dot_mic(1, ROWS_A, Y, COLS_A, A, S); free(Y); } } else if (AXIS == 1){ S = allocate_host(ROWS_A); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(ROWS_A) ALLOC) { float *restrict Y = ones_mic(COLS_A); dot_mic(ROWS_A, COLS_A, A, 1, Y, S); free(Y); } } else if (AXIS == 2){ S = allocate_host(2); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(2) ALLOC) { float *restrict Y = ones_mic(ROWS_A*COLS_A); S[0] = cblas_sdot(ROWS_A*COLS_A, A, 1, Y, 1); free(Y); } } return S; } int *max_axis(int ROWS_A, int COLS_A, float *restrict A, int AXIS){ int *restrict S; float A_MIN = 268435456; #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < ROWS_A*COLS_A; n++) {if (A[n] < A_MIN) A_MIN = A[n];} #pragma omp parallel for private(n) for (n = 0; n < ROWS_A*COLS_A; n++) A[n] = A[n] - A_MIN; } if (AXIS == 0){ S = allocate_host_int(COLS_A); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(COLS_A) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < COLS_A; n++){ S[n] = cblas_isamax(ROWS_A, A + n, COLS_A); S[n] = S[n]*COLS_A + n; } } } else if (AXIS == 1){ // S = _mm_malloc(ROWS_A*sizeof(float), 64); S = allocate_host_int(ROWS_A); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(ROWS_A) ALLOC) { int n; #pragma omp parallel for private(n) for (n = 0; n < ROWS_A; n++){ S[n] = cblas_isamax(COLS_A, A + n*COLS_A, 1); S[n] = S[n] + n*COLS_A; } } } else if (AXIS == 2){ // S = _mm_malloc(2*sizeof(float), 64); S = allocate_host_int(2); #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) \ nocopy(S:length(2) ALLOC) { S[0] = cblas_isamax(ROWS_A*COLS_A, A, 1); } } #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < ROWS_A*COLS_A; n++) A[n] = A[n] + A_MIN; } return S; } void index_global_to_local(int ROWS, int COLS, int *restrict A, int AXIS){ if (AXIS == 0){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < COLS; n++){ A[n] = (A[n] - n)/COLS;} } } else if (AXIS == 1){ #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { int n; #pragma omp parallel for private(n) for (n = 0; n < ROWS; n++){ A[n] = A[n] - n*COLS; } } } } float sumo(int N, float *restrict A){ float S; #pragma offload target(mic:MIC_DEV) \ in(A:length(0) REUSE) { float *restrict Y = ones_mic(N); S = cblas_sdot(N, A, 1, Y, 1); // _mm_free(Y); free(Y); } return S; } int *convolve_and_pool(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, float *FILTERS, float *OUTPUTS, int pool_radius, int stride, int *ARGMAXS, int argmaxs_fixed){ int output_H = H - Y + 1; int output_W = W - X + 1; int pooled_H = ceil(((float) output_H)/pool_radius); int pooled_W = ceil(((float) output_W)/pool_radius); #pragma offload target(mic:MIC_DEV) \ in(INPUTS:length(0) REUSE) \ in(FILTERS:length(0) REUSE) \ in(OUTPUTS:length(0) REUSE) \ in(ARGMAXS:length(0) REUSE) { float convolution; int n, k, h, w, c, y, x, lin_index, h_arg, w_arg; if (argmaxs_fixed == 0){ #pragma omp parallel for private(n) for (n = 0; n < N*K*pooled_H*pooled_W; n++) OUTPUTS[n] = -1.0e10; #pragma omp parallel for collapse(4) private(n, k, h, w, convolution, c, y, x) for (n = 0; n < N; n++){ for (k = 0; k < K; k++){ // loop over 2D pre-pooled map given n, k for (h = 0; h < output_H; h += stride){ for (w = 0; w < output_W; w += stride){ // Code needs to not be parallelized from here on; make sure variables are not shared // compute convolution for a particular set of n, k, h, w // then push to pooled layer convolution = 0.f; for (c = 0; c < C; c++) for (y = 0; y < Y; y++) for (x = 0; x < X; x++) convolution += INPUTS[ti(n, c, h + y, w + x, C, H, W)] * FILTERS[ti(k, c, y, x, C, Y, X)]; // #pragma omp critical if (convolution > OUTPUTS[ti(n, k, h/pool_radius, w/pool_radius, K, pooled_H, pooled_W)]){ OUTPUTS[ti(n, k, h/pool_radius, w/pool_radius, K, pooled_H, pooled_W)] = convolution; ARGMAXS[ti(n, k, h/pool_radius, w/pool_radius, K, pooled_H, pooled_W)] = ti(n, k, h, w, K, output_H, output_W); } } } } } } else{ #pragma omp parallel for private(n) for (n = 0; n < N*K*pooled_H*pooled_W; n++) OUTPUTS[n] = 0.f; #pragma omp parallel for collapse(7) private(n, k, h, w, c, y, x, lin_index, h_arg, w_arg) // loop over pooled elements for (n = 0; n < N; n++) for (k = 0; k < K; k++) for (h = 0; h < pooled_H; h++) for (w = 0; w < pooled_W; w++) // add up convolution for (c = 0; c < C; c++) for (y = 0; y < Y; y++) for (x = 0; x < X; x++){ lin_index = ARGMAXS[ti(n, k, h, w, K, pooled_H, pooled_W)]; h_arg = it(lin_index, 2, K, output_H, output_W); w_arg = it(lin_index, 3, K, output_H, output_W); OUTPUTS[ti(n, k, h, w, K, pooled_H, pooled_W)] += INPUTS[ti(n, c, h_arg + y, w_arg + x, C, H, W)] * FILTERS[ti(k, c, y, x, C, Y, X)]; } } } return ARGMAXS; } void convolve_gradient(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, float *FILTERS, int *ARGMAXS, float *D_POOLED_OUTPUTS, int pool_radius, float *D_INPUTS, float *D_FILTERS){ int output_H = H - Y + 1; int output_W = W - X + 1; int pooled_H = ceil(((float) output_H)/pool_radius); int pooled_W = ceil(((float) output_W)/pool_radius); #pragma offload target(mic:MIC_DEV) \ in(INPUTS:length(0) REUSE) \ in(FILTERS:length(0) REUSE) \ in(ARGMAXS:length(0) REUSE) \ in(D_INPUTS:length(0) REUSE) \ in(D_POOLED_OUTPUTS:length(0) REUSE) \ in(D_FILTERS:length(0) REUSE) { int k, c, x, y, n, h, w, h_pooled, w_pooled, lin_index, h_arg, w_arg; // filter gradient computation #pragma omp parallel for collapse(7) private(k, c, x, y, n, h_pooled, w_pooled, lin_index, h_arg, w_arg) // loop over elements of filter for (k = 0; k < K; k++) for (c = 0; c < C; c++) for (x = 0; x < X; x++) for (y = 0; y < Y; y++) for (n = 0; n < N; n++) // loop over 2D map of pooled output for (h_pooled = 0; h_pooled < pooled_H; h_pooled++) for (w_pooled = 0; w_pooled < pooled_W; w_pooled++){ // argmax is over the output image lin_index = ARGMAXS[ti(n, k, h_pooled, w_pooled, K, pooled_H, pooled_W)]; h_arg = it(lin_index, 2, K, output_H, output_W); w_arg = it(lin_index, 3, K, output_H, output_W); D_FILTERS[ti(k, c, y, x, C, Y, X)] += D_POOLED_OUTPUTS[ti(n, k, h_pooled, w_pooled, K, pooled_H, pooled_W)] * INPUTS[ti(n, c, h_arg + y, w_arg + x, C, H, W)]; D_INPUTS[ti(n, c, h_arg + y, w_arg + x, C, H, W)] += D_POOLED_OUTPUTS[ti(n, k, h_pooled, w_pooled, K, pooled_H, pooled_W)] * FILTERS[ti(k, c, y, x, C, Y, X)]; } // // input gradient computation // #pragma omp parallel for collapse(7) private(n, c, h_pooled, w_pooled, k, x, y, lin_index, h_arg, w_arg) // // loop over elements of input // for (n = 0; n < N; n++) // for (c = 0; c < C; c++) // for (h_pooled = 0; h_pooled < pooled_H; h_pooled++) // for (w_pooled = 0; w_pooled < pooled_W; w_pooled++) // for (k = 0; k < K; k++) // for (x = 0; x < X; x++) // for (y = 0; y < Y; y++){ // lin_index = ARGMAXS[ti(n, k, h_pooled, w_pooled, K, pooled_H, pooled_W)]; // h_arg = it(lin_index, 2, K, output_H, output_W); // w_arg = it(lin_index, 3, K, output_H, output_W); // D_INPUTS[ti(n, c, h_arg + y, w_arg + x, C, H, W)] += D_POOLED_OUTPUTS[ti(n, k, h_pooled, w_pooled, K, pooled_H, pooled_W)] // * FILTERS[ti(k, c, y, x, C, Y, X)]; // } } } void convolve(int N, int C, int H, int W, float *INPUTS, int K, int Y, int X, float *FILTERS, float *OUTPUTS, int tight){ // C channels per input, per filter #pragma offload target(mic:MIC_DEV) \ in(INPUTS:length(0) REUSE) \ in(FILTERS:length(0) REUSE) \ in(OUTPUTS:length(0) REUSE) { float *output_scratch = (float *) malloc((H + Y - 1)*(W + X - 1)*sizeof(float)); // convolution mode can also be set manually to be VSL_CONV_MODE_FFT, VSL_CONV_MODE_DIRECT int INPUTS_SHAPE[] = {H, W}; int FILTERS_SHAPE[] = {Y, X}; int OUTPUTS_SHAPE[] = {H + Y - 1, W + X - 1}; int INPUTS_STRIDE[] = {W, 1}; int FILTERS_STRIDE[] = {-X, -1}; int OUTPUTS_STRIDE[] = {W + X - 1, 1}; int output_H = H + Y - 1; int output_W = W + X - 1; if (tight == 1){ output_H = H - Y + 1; output_W = W - X + 1; } VSLConvTaskPtr ConvTask; // #pragma omp parallel for for (int n = 0; n < N; n++){ for (int c = 0; c < C; c++){ float *input = &INPUTS[(n*C + c)*H*W]; vslsConvNewTaskX(&ConvTask, VSL_CONV_MODE_AUTO, 2, INPUTS_SHAPE, FILTERS_SHAPE, OUTPUTS_SHAPE, input, INPUTS_STRIDE); for (int k = 0; k < K; k++){ float *filter = &FILTERS[(k*C + c)*X*Y]; float *output = &OUTPUTS[(n*K + k)*output_H*output_W]; vslsConvExecX(ConvTask, filter, FILTERS_STRIDE, output_scratch, OUTPUTS_STRIDE); // max-pooling here, before tightening convolution even // need to output argmax's of indices (corresponding to indices of padded array?) if (tight == 1){ for (int h = 0; h < output_H; h++) for (int w = 0; w < output_W; w++) output_scratch[h*output_W + w] = output_scratch[(h + Y - 1)*(W + X - 1) + (w + X - 1)]; } if (c == 0) cblas_scopy(output_H*output_W, output_scratch, 1, output, 1); else cblas_saxpy(output_H*output_W, 1., output_scratch, 1, output, 1); } } vslConvDeleteTask(&ConvTask); } free(output_scratch); } } void check_mic_status(){ _Offload_status mic_status; OFFLOAD_STATUS_INIT(mic_status); int NUM_MIC; #pragma offload target(mic) status(mic_status) mandatory { NUM_MIC = _Offload_get_device_number(); } if (NUM_MIC < 0) printf("Found no MICs."); if (mic_status.result == OFFLOAD_SUCCESS) { printf("Offload test was successful.\n\n"); } else { printf("Offload failed.\n"); if (mic_status.result == OFFLOAD_OUT_OF_MEMORY) { printf("Offload failed due to insufficient memory.\n"); } } } void ping_each_core(){ #pragma offload target(mic:MIC_DEV) { #pragma omp parallel { #ifdef __MIC__ printf("MIC: greetings from thread %d out of %d.\n", omp_get_thread_num(), omp_get_num_threads()); fflush(0); #else printf("HOST: greetings from thread %d out of %d.\n", omp_get_thread_num(), omp_get_num_threads()); fflush(0); #endif } } }
convolution_1x1_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave #if __aarch64__ Mat tmp(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + (size%12%4)/2 + size%12%2, elemsize, elempack, opt.workspace_allocator); #else Mat tmp(8, inch, size/8 + (size%8)/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator); #endif { int nn_size; int remain_size_start; #if __aarch64__ nn_size = size / 12; remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 12; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; unsigned short* tmpptr = tmp.channel(i/12); for (int q=0; q<inch; q++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "prfm pldl1keep, [%0, #128] \n" "ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" "st1 {v4.4h}, [%1], #8 \n" "st1 {v1.8h}, [%1], #16 \n" "st1 {v5.4h}, [%1], #8 \n" "sub %0, %0, #64 \n" "st1 {v2.8h}, [%1], #16 \n" "st1 {v6.4h}, [%1], #8 \n" "st1 {v3.8h}, [%1], #16 \n" "st1 {v7.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); img0 += bottom_blob.cstep * 4; } } #else remain_size_start = 0; #endif nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 8; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); #else unsigned short* tmpptr = tmp.channel(i/8); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3" ); #else asm volatile( "pld [%0, #256] \n" "vld4.u16 {d0-d3}, [%0]! \n" "pld [%0, #256] \n" "vld4.u16 {d4-d7}, [%0] \n" "sub %0, %0, #32 \n" "vst1.u16 {d0}, [%1 :64]! \n" "vst1.u16 {d4}, [%1 :64]! \n" "vst1.u16 {d1}, [%1 :64]! \n" "vst1.u16 {d5}, [%1 :64]! \n" "vst1.u16 {d2}, [%1 :64]! \n" "vst1.u16 {d6}, [%1 :64]! \n" "vst1.u16 {d3}, [%1 :64]! \n" "vst1.u16 {d7}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.u16 {d0-d3}, [%0 :128] \n" "vst1.u16 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 2; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0" ); #else asm volatile( "pld [%0, #128] \n" "vld1.u16 {d0-d1}, [%0 :128] \n" "vst1.u16 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0" ); #else asm volatile( "pld [%0, #64] \n" "vld1.u16 {d0}, [%0 :64] \n" "vst1.u16 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p+1); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i=0; for (; i+11<size; i+=12) { const unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"// w0011_01 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"// w2233_01 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+7<size; i+=8) { unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r0 r1 r2 r3 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n"// r4 r5 r6 r7 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<size; i+=4) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r0 r1 r2 r3 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i+1<size; i+=2) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v1.16b \n" "mov v19.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n"// r0 r1 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%1], #16 \n" "st1 {v18.4h, v19.4h}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } for (; i<size; i++) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%10] \n" "0: \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n"// r0 "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "st1 {v16.4h}, [%1], #8 \n" "st1 {v17.4h}, [%2], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { unsigned short* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i=0; #if __aarch64__ for (; i+11<size; i+=12) { unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n"// w0123_0 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n" "st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif for (; i+7<size; i+=8) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r0 r1 r2 r3 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n"// r4 r5 r6 r7 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "vmov q12, q0 \n" "vmov q13, q0 \n" "vmov q14, q0 \n" "vmov q15, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d18, q10, #16 \n" "vshrn.u32 d19, q11, #16 \n" "vshrn.u32 d20, q12, #16 \n" "vshrn.u32 d21, q13, #16 \n" "vshrn.u32 d22, q14, #16 \n" "vshrn.u32 d23, q15, #16 \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif } for (; i+3<size; i+=4) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r0 r1 r2 r3 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19" ); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d18, q10, #16 \n" "vshrn.u32 d19, q11, #16 \n" "vst1.u16 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif } for (; i+1<size; i+=2) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n"// r0 r1 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17" ); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "0: \n" "pld [%2, #128] \n" "vld1.u16 {d4-d5}, [%2 :128]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vst1.u16 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9" ); #endif } for (; i<size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v16.4s}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n"// r0 "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "st1 {v16.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16" ); #else asm volatile( "vld1.f32 {d16-d17}, [%8] \n" "0: \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n" "vshll.u16 q0, d1, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16}, [%1 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const short bias0 = bias ? bias[p] : 0.f; // // unsigned short* outptr0 = out0; // // for (int i=0; i<size; i++) // { // short sum = bias0; // // const unsigned short* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const unsigned short* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2*outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j+3 < outw; j+=4) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0+8); uint16x4_t _v2 = vld1_u16(r0+16); uint16x4_t _v3 = vld1_u16(r0+24); uint16x8_t _v01 = vcombine_u16(_v0, _v1); uint16x8_t _v23 = vcombine_u16(_v2, _v3); vst1q_u16(outptr, _v01); vst1q_u16(outptr+8, _v23); r0 += 32; outptr += 16; } for (; j+1 < outw; j+=2) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0+8); uint16x8_t _v = vcombine_u16(_v0, _v1); vst1q_u16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { uint16x4_t _v = vld1_u16(r0); vst1_u16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
data_provider.h
#ifndef DATAPROVIDER_H__ #define DATAPROVIDER_H__ #include <boost/interprocess/allocators/allocator.hpp> #include <boost/interprocess/containers/vector.hpp> #include <boost/interprocess/managed_shared_memory.hpp> #include <boost/interprocess/sync/named_mutex.hpp> #include <opencv2/opencv.hpp> #include <chrono> #include <sstream> #include <string> #include <thread> #include <vector> #include "net_config.h" #include "misc.h" namespace caffe2 { using namespace boost::interprocess; template<typename T> class DataProvider { public: DataProvider(const int batch_size, const string& data_order, const bool dummy_data, const int iterations, const string& net_conf, const string& shared_memory_option, const string& numa_id, const bool use_index); DataProvider(const vector<string>& imgNames, const vector<int>& labels, const int batch_size, const string& data_order, const bool dummy_data, const int iterations, const string& net_conf, const string& shared_memory_option, const string& numa_id, const bool use_index); DataProvider(const string& file_list, const string& image_path, const string& label_path, const int batch_size, const string& data_order, const bool dummy_data, const int iterations, const string& net_conf, const string& shared_memory_option, const string& numa_id, const bool use_index); ~DataProvider(); T* get_data() { return inputData_; } const vector<int>& get_labels() { return labels_; } const int get_iterations() { return iterations_; } void clean_shared_memory(const string& numa_id); void load_sample(size_t* samples, size_t sample_size, bool dummy_data, const string& file_list, const string& image_path, const string& label_path); void load_sample(size_t* sample, size_t sample_size); private: std::unique_ptr<NetConf> net_conf_; T* inputData_; managed_shared_memory managed_shm_; int batchSize_ = 1; int iterations_ = 1; int sample_offset_ = 0; unsigned long long inputSize_ = 0; string dataOrder_ = "NCHW"; string sharedMemory_; string numaId_; vector<int> input_shape_; vector<string> imgNames_; vector<int> labels_; vector<T> inputImgs_; // use for mlperf random index load sample const size_t IMAGENET_IMAGE_SIZE = 50000; bool useIndex_ = false; vector<T> loadBuffer_; void ParseImageLabel(const string& file_list, const string& image_path, const string& label_path, const size_t sample_size, const bool dummy_data); void ParseImageLabel(const string& file_list); // methods for preprocessing void SetMeanScale(); void CenterCrop(cv::Mat* sample_resized, cv::Mat* sample_roi); void ResizeWithAspect(cv::Mat* sample, cv::Mat* sample_resized); void ResizeWithRescale(cv::Mat* sample, cv::Mat* sample_resized); void PreprocessSingleIteration(T* inputImgs, const vector<string>& imgNames); void PreprocessUsingCVMethod(T* inputImgs, const vector<string>& imgNames); void Preprocess(const bool dummy, T* inputImgs, const vector<string>& imgNames); // methods for memory used void WrapInput(const bool dummy_data); void WrapSHMInput(const bool dummy_data); void CreateUseSharedMemory(const bool dummy_data); void DirectUseSharedMemory(const bool dummy_data); void WrapLocalInput(const bool dummy_data); void CleanSharedMemory(const string& numa_id); }; template<typename T> DataProvider<T>::DataProvider(const vector<string>& imgNames,const vector<int>& labels, const int batch_size, const string& data_order, const bool dummy_data, const int iterations, const string& net_conf, const string& shared_memory_option, const string& numa_id, const bool use_index) : inputData_(nullptr), batchSize_(batch_size), iterations_(iterations), dataOrder_(data_order), sharedMemory_(shared_memory_option), numaId_(numa_id), imgNames_(imgNames), labels_(labels), useIndex_(use_index) { net_conf_ = get_net_conf(net_conf); WrapInput(dummy_data); } template<typename T> void DataProvider<T>::load_sample(size_t* samples, size_t sample_size, bool dummy_data, const string& file_list, const string& image_path, const string& label_path) { size_t imagenet_size = 50000; ParseImageLabel(file_list, image_path, label_path, imagenet_size, dummy_data); vector<string> sample_names; vector<int> sample_labels; auto use_size = sample_size % batchSize_ ? (sample_size / batchSize_ + 1 ) * batchSize_ : sample_size; auto flexible_size = sample_size < imagenet_size ? sample_size : use_size; for (size_t i = 0; i < flexible_size; ++i) { sample_names.push_back(imgNames_[samples[i % sample_size]]); sample_labels.push_back(labels_[samples[i % sample_size]]); } if (sample_size != batchSize_ * iterations_) { LOG(ERROR) << "batchsize * iteration is not equal to sampled images"; iterations_ = sample_size / batchSize_; sample_names.resize(batchSize_ * iterations_); sample_labels.resize(batchSize_ * iterations_); } imgNames_ = sample_names; labels_ = sample_labels; WrapInput(dummy_data); } template<typename T> void DataProvider<T>::load_sample(size_t* samples, size_t sample_size) { if (useIndex_) { vector<int> sample_labels(batchSize_ * iterations_, 0); if (sharedMemory_ == "CREATE_USE_SHM" || sharedMemory_ == "USE_LOCAL") { loadBuffer_.resize(iterations_ * batchSize_ * inputSize_, 0); if (sample_size != batchSize_ * iterations_) LOG(FATAL) << "sample size is not equal to batchsize * iterations"; #pragma omp parallel for for (size_t i = 0; i < sample_size; ++i) { std::memcpy(loadBuffer_.data() + i * inputSize_, inputData_ + samples[i] * inputSize_, inputSize_ * sizeof(T)); sample_labels[i] = labels_[samples[i]]; } std::memcpy(inputData_, loadBuffer_.data(), batchSize_ * iterations_ * inputSize_ * sizeof(T)); labels_ = sample_labels; if (sharedMemory_ == "CREATE_USE_SHM") { *(managed_shm_.find<bool>(("SharedMemorySwap" + numaId_).c_str()).first) = true; } } else { int temp_status = 0; // check whether images has been preprocessed #pragma omp parallel for for (size_t i = 0; i < sample_size; ++i) { sample_labels[i] = labels_[samples[i]]; } labels_ = sample_labels; while (!(*(managed_shm_.find<bool>(("SharedMemorySwap" + numaId_).c_str()).first))) { if (temp_status == 0) { LOG(INFO) << "image swapping not ready, wait image memory swapping completed"; temp_status++; } std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } } } template<typename T> DataProvider<T>::DataProvider(const int batch_size, const string& data_order, const bool dummy_data, const int iterations, const string& net_conf, const string& shared_memory_option, const string& numa_id, const bool use_index) : inputData_(nullptr), batchSize_(batch_size), iterations_(iterations), dataOrder_(data_order), sharedMemory_(shared_memory_option), numaId_(numa_id), useIndex_(use_index) { net_conf_ = get_net_conf(net_conf); } template<typename T> DataProvider<T>::DataProvider(const string& file_list, const string& image_path, const string& label_path, const int batch_size, const string& data_order, const bool dummy_data, const int iterations, const string& net_conf, const string& shared_memory_option, const string& numa_id, const bool use_index) : inputData_(nullptr), batchSize_(batch_size), iterations_(iterations), dataOrder_(data_order), sharedMemory_(shared_memory_option), numaId_(numa_id), useIndex_(use_index) { net_conf_ = get_net_conf(net_conf); size_t parse_size = batchSize_ * iterations_ ; if ((batchSize_ * iterations_ < IMAGENET_IMAGE_SIZE) && useIndex_) parse_size = IMAGENET_IMAGE_SIZE; ParseImageLabel(file_list, image_path, label_path, parse_size, dummy_data); WrapInput(dummy_data); } template<typename T> DataProvider<T>::~DataProvider() { } template<typename T> void DataProvider<T>::clean_shared_memory(const string& numa_id) { CleanSharedMemory(numa_id); } template<typename T> void DataProvider<T>::CleanSharedMemory(const string& numa_id) { shared_memory_object::remove(("SharedMemory" + numa_id).c_str()); } template<typename T> void DataProvider<T>::CreateUseSharedMemory(const bool dummy_data) { const size_t TOTAL_IMAGE_SIZE = 50001 * 8; managed_shm_ = managed_shared_memory(open_or_create, ("SharedMemory" + numaId_).c_str() , TOTAL_IMAGE_SIZE * inputSize_); // check whether shared memory has prepared target image data, if not, prepare target data. auto shared_image_size = managed_shm_.find_or_construct<int>(("SharedImageSize" + numaId_).c_str())(0); managed_shm_.find_or_construct<bool>(("SharedMemorySwap" + numaId_).c_str())(false); const allocator<T, managed_shared_memory::segment_manager> alloc_inst(managed_shm_.get_segment_manager()); auto shared_input_images = managed_shm_.find_or_construct<vector<T, allocator<T,managed_shared_memory::segment_manager>>>(("SharedInputImgs" + numaId_).c_str())(alloc_inst); // do preprocess only when shared memory don't has enough images buffered. if (*shared_image_size != iterations_ * batchSize_) { size_t parse_size = batchSize_ * iterations_ ; if ((batchSize_ * iterations_ < IMAGENET_IMAGE_SIZE) && useIndex_) parse_size = IMAGENET_IMAGE_SIZE; shared_input_images->resize(parse_size * inputSize_); inputData_ = shared_input_images->data(); Preprocess(dummy_data, inputData_, imgNames_); *shared_image_size = iterations_ * batchSize_; } else { inputData_ = shared_input_images->data(); } } template<typename T> void DataProvider<T>::DirectUseSharedMemory(const bool dummy_data) { int temp_status = 0; while (temp_status == 0) { try { managed_shm_ = managed_shared_memory(open_only, ("SharedMemory" + numaId_).c_str()); temp_status = 1; } catch(boost::interprocess::interprocess_exception) { LOG(INFO) << "check whether shared memory created, use CREATE_USE_SHM in command line"; std::this_thread::sleep_for(std::chrono::milliseconds(100)); } } temp_status = 0; // check whether images has been preprocessed while (*(managed_shm_.find<int>(("SharedImageSize" + numaId_).c_str()).first) != batchSize_ * iterations_) { if (temp_status == 0) { LOG(INFO) << "shared image size not satisfied, wait preprocess completed"; temp_status++; } std::this_thread::sleep_for(std::chrono::milliseconds(10)); } auto shared_input_images = managed_shm_.find<vector<T, allocator<T, managed_shared_memory::segment_manager>>>(("SharedInputImgs" + numaId_).c_str()); inputData_ = shared_input_images.first->data(); } template<typename T> void DataProvider<T>::WrapSHMInput(const bool dummy_data) { LOG(INFO) << "use shared memory: " << sharedMemory_; if (sharedMemory_ == "CREATE_USE_SHM") { CleanSharedMemory(numaId_); CreateUseSharedMemory(dummy_data); } else { DirectUseSharedMemory(dummy_data); } } template<typename T> void DataProvider<T>::WrapLocalInput(const bool dummy_data) { LOG(INFO) << "use local memory"; if (dummy_data) inputImgs_.resize(batchSize_ * inputSize_, 0); else inputImgs_.resize(iterations_ * batchSize_ * inputSize_, 0); inputData_ = inputImgs_.data(); Preprocess(dummy_data, inputData_, imgNames_); } template<typename T> void DataProvider<T>::WrapInput(const bool dummy_data) { inputSize_ = net_conf_->channels * net_conf_->height * net_conf_->width; if (sharedMemory_ == "USE_LOCAL") { WrapLocalInput(dummy_data); } else { WrapSHMInput(dummy_data); } } template<typename T> void DataProvider<T>::SetMeanScale() { } template<typename T> void DataProvider<T>::ResizeWithAspect(cv::Mat* sample, cv::Mat* sample_resized) { auto scale = net_conf_->aspect_scale; auto new_height = static_cast<int>(100. * net_conf_->height / scale); auto new_width = static_cast<int>(100. * net_conf_->width / scale); auto inter_pol = net_conf_->net_name == "resnet50" ? cv::INTER_AREA: cv::INTER_LINEAR; if ((*sample).rows > (*sample).cols) { auto res = static_cast<int>((*sample).rows * new_width / (*sample).cols); cv::resize((*sample), (*sample_resized), cv::Size(new_width, res), (0, 0), (0, 0), inter_pol); } else { auto res = static_cast<int>((*sample).cols * new_height / (*sample).rows); cv::resize((*sample), (*sample_resized), cv::Size(res, new_height), (0, 0), (0, 0), inter_pol); } } // resize image using rescale template<typename T> void DataProvider<T>::ResizeWithRescale(cv::Mat* sample, cv::Mat* sample_rescale) { auto aspect = static_cast<float>((*sample).cols) / (*sample).rows; if (aspect > 1) { auto res = static_cast<int>(net_conf_->rescale_size * aspect); cv::resize((*sample), (*sample_rescale), cv::Size(res, net_conf_->rescale_size)); } else { auto res = static_cast<int>(net_conf_->rescale_size / aspect); cv::resize((*sample), (*sample_rescale), cv::Size(net_conf_->rescale_size, res)); } } template<typename T> void DataProvider<T>::CenterCrop(cv::Mat* sample_resized, cv::Mat* sample_roi) { int x = (*sample_resized).cols; int y = (*sample_resized).rows; int startx = static_cast<int>(std::floor(x * 0.5 - net_conf_->width * 0.5)); int starty = static_cast<int>(std::floor(y * 0.5 - net_conf_->height * 0.5)); cv::Rect roi(startx, starty, net_conf_->width, net_conf_->height); // roi image (*sample_roi) = (*sample_resized)(roi); } template<typename T> void DataProvider<T>::PreprocessUsingCVMethod(T* inputImgs, const vector<string>& imgNames) { // wrap and process image files. cv::Mat mean(net_conf_->width, net_conf_->height, CV_32FC3, cv::Scalar(net_conf_->mean_value[0], net_conf_->mean_value[1], net_conf_->mean_value[2])); cv::Mat scale(net_conf_->width, net_conf_->height, CV_32FC3, cv::Scalar(net_conf_->scale, net_conf_->scale, net_conf_->scale)); bool quantized_ = false; if (sizeof(T) == sizeof(char)) quantized_ = true; int converted_type; if (quantized_) { if (dataOrder_ == "NCHW") converted_type = CV_8SC1; else if (dataOrder_ == "NHWC") converted_type = CV_8SC3; } else { if (dataOrder_ == "NCHW") converted_type = CV_32FC1; else if (dataOrder_ == "NHWC") converted_type = CV_32FC3; } #pragma omp parallel for for (size_t i = 0; i < imgNames.size(); ++i) { auto input_data = inputImgs + i * inputSize_; cv::Mat img = cv::imread(imgNames[i]); // convert the input image to the input image format of the network. cv::Mat sample; if (img.channels() == 3 && net_conf_->channels == 1) cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY); else if (img.channels() == 4 && net_conf_->channels == 1) cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY); else if (img.channels() == 4 && net_conf_->channels == 3) cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR); else if (img.channels() == 1 && net_conf_->channels == 3) cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR); else sample = img; cv::Mat sample_resized; cv::Mat sample_roi; if (net_conf_->preprocess_method == "ResizeWithAspect") ResizeWithAspect(&sample, &sample_resized); else ResizeWithRescale(&sample, &sample_resized); CenterCrop(&sample_resized, &sample_roi); cv::Mat sample_float; if (net_conf_->channels == 3) { sample_roi.convertTo(sample_float, CV_32FC3); } else sample_roi.convertTo(sample_float, CV_32FC1); cv::Mat sample_subtract, sample_normalized; if (net_conf_->net_name=="resnet50") { cv::subtract(sample_float, mean, sample_subtract); cv::multiply(sample_subtract, scale, sample_normalized); } else if (net_conf_->net_name=="mobilenetv1") { cv::subtract(sample_float, mean, sample_subtract); cv::divide(sample_subtract, mean, sample_normalized); } vector<cv::Mat> input_channels; if (net_conf_->bgr2rgb) cv::cvtColor(sample_normalized, sample_normalized, cv::COLOR_RGB2BGR); if (quantized_) sample_normalized.convertTo(sample_normalized, converted_type, 1.0 / net_conf_->input_scale); if (dataOrder_ == "NCHW") { for (auto j = 0; j < net_conf_->channels; ++j) { cv::Mat channel(net_conf_->height, net_conf_->width, converted_type, input_data); input_channels.push_back(channel); input_data += net_conf_->width * net_conf_->height; } /* This operation will write the separate BGR planes directly to the * input layer of the network because it is wrapped by the cv::Mat * objects in input_channels. */ cv::split(sample_normalized, input_channels); } else if (dataOrder_ == "NHWC") { cv::Mat channel(net_conf_->height, net_conf_->width, converted_type, input_data); sample_normalized.copyTo(channel); } // add zero_point 128 to u8 format auto u8_input_opt_option = getenv("U8_INPUT_OPT"); if (quantized_ && (u8_input_opt_option != NULL) && (atoi(u8_input_opt_option) != 0)) for(size_t i = 0; i < inputSize_; ++i) input_data[i] += 128; } } // preprocess the img according to batch_size * iterations template<typename T> void DataProvider<T>::Preprocess(const bool dummy, T* inputImgs, const vector<string>& imgNames) { if (!dummy) { LOG(INFO) << "this process will preprocess images"; PreprocessUsingCVMethod(inputImgs, imgNames); } else { // only use one batch dummy for (int i = 0; i < batchSize_ * inputSize_; ++i) inputImgs[i] = static_cast<T>(std::rand()) / RAND_MAX; } } // preprocess the img according to batch size template<typename T> void DataProvider<T>::PreprocessSingleIteration(T* inputImgs, const vector<string>& imgNames) { // wrap and process image files. int img_size = net_conf_->channels * net_conf_->height * net_conf_->width; cv::Mat float_img; for (size_t i = 0; i < imgNames.size(); ++i) { cv::Mat raw_img = cv::imread(imgNames[i]); cv::Mat resized_img; cv::resize(raw_img, resized_img, cv::Size(net_conf_->width, net_conf_->height), 0, 0, cv::INTER_LINEAR); resized_img.convertTo(float_img, CV_32FC3); int tran_c = 0; int index = 0; for (int h = 0; h < net_conf_->height; ++h) { for (int w = 0; w < net_conf_->width; ++w) { for ( int c = 0; c < net_conf_->channels; ++c) { tran_c = net_conf_->bgr2rgb ? (2-c) : c; if (dataOrder_ == "NHWC") { index = img_size * i + h * net_conf_->width * net_conf_->channels + w * net_conf_->channels + c; } else if (dataOrder_ == "NCHW") { index = img_size * i + c * net_conf_->width * net_conf_->height + h * net_conf_->width + w; } inputImgs[index] = static_cast<T>((float_img.ptr<cv::Vec3f>(h)[w][tran_c] - net_conf_->mean_value[c]) * net_conf_->scale); } } } } } template<typename T> void DataProvider<T>::ParseImageLabel(const string& file_list) { // wrap and process image files. std::ifstream image_file(file_list); string val; while (getline(image_file, val)) { auto pos = val.find(" "); auto label = std::atoi(val.substr(pos+1).c_str()); labels_.push_back(label); imgNames_.push_back(val.substr(0, pos)); if (imgNames_.size() == batchSize_ * iterations_) break; } image_file.close(); if (imgNames_.size() < batchSize_ * iterations_) { LOG(ERROR) << "check val.txt to prepare proper quantity of images!"; LOG(FATAL) << "batch * iterations_ size is too large!"; } } template<typename T> void DataProvider<T>::ParseImageLabel(const string& file_list, const string& image_path, const string& label_path, const size_t sample_size, const bool dummy_data) { if (dummy_data) { LOG(INFO) << "dummy data will not parse the image"; labels_.resize(sample_size); return; } string val; // wrap and process image files. if (!file_list.empty() || (!label_path.empty() && !image_path.empty())) { string file_name; if (!file_list.empty()) file_name = file_list; else file_name = label_path; std::ifstream image_file(file_name); while (getline(image_file, val)) { auto pos = val.find(" "); auto label = std::atoi(val.substr(pos + 1).c_str()); labels_.push_back(label); if (!file_list.empty()) { imgNames_.push_back(val.substr(0, pos)); } else { string image_val = val.substr(0, pos); auto image_pos = (image_val.find_last_of("/")); imgNames_.push_back(image_path + image_val.substr(image_pos + 1)); } if (imgNames_.size() == sample_size) break; } image_file.close(); } else if (image_path == "") { LOG(FATAL) << "image path should be given!"; } else { if (label_path == "") { LOG(WARNING) << "label path not given, accuracy not caculated!"; DIR* image_dir = opendir(image_path.c_str()); if (image_dir == nullptr) LOG(FATAL) << "can't read image path!"; struct dirent* file_name; int filter_dir = 0; while((file_name = readdir(image_dir)) != nullptr) { // linux dir will read dir "." and ".." as file_name, so filter that if ((filter_dir++) < 2) continue; imgNames_.push_back(image_path + file_name->d_name); if (imgNames_.size() == sample_size) break; } } } if (imgNames_.size() < sample_size) { LOG(ERROR) << "image size is " << imgNames_.size() << " sample size is " << sample_size; size_t append_count = sample_size - imgNames_.size(); size_t real_image_size = imgNames_.size(); for (size_t i = 0; i < append_count; ++i) { imgNames_.push_back(imgNames_[i % real_image_size]); labels_.push_back(labels_[i % real_image_size]); } return; } } } // using namespace caffe2 #endif // DATAPROVIDER_H__
pack.c
#include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif void pack_matrix_col2row_s(const ALPHA_INT rowX, const ALPHA_INT colX, const float *X, const ALPHA_INT ldX, float * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT colX4 = colX - 3; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT c = 0; c < colX4; c += 4) { const float *xp0 = &X[index2(c, 0, ldX)]; const float *xp1 = &X[index2(c + 1, 0, ldX)]; const float *xp2 = &X[index2(c + 2, 0, ldX)]; const float *xp3 = &X[index2(c + 3, 0, ldX)]; float *yp = &Y[c]; for (ALPHA_INT r = 0; r < rowX; ++r) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); yp += ldY; } } for (ALPHA_INT c = colX4 < 0 ? 0 : colX4; c < colX; c += 1) { const float *xp0 = &X[index2(c, 0, ldX)]; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT r = 0; r < rowX; ++r) { Y[index2(r, c, ldY)] = xp0[r]; } } } void pack_matrix_row2col_s(const ALPHA_INT rowX, const ALPHA_INT colX, const float *X, const ALPHA_INT ldX, float * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = 8 > alpha_get_thread_num() ? alpha_get_thread_num() : 8; ALPHA_INT rowX4 = rowX - 15; #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < rowX4; r += 16) { const float *xp0 = &X[index2(r, 0, ldX)]; const float *xp1 = &X[index2(r + 1, 0, ldX)]; const float *xp2 = &X[index2(r + 2, 0, ldX)]; const float *xp3 = &X[index2(r + 3, 0, ldX)]; const float *xp4 = &X[index2(r + 4, 0, ldX)]; const float *xp5 = &X[index2(r + 5, 0, ldX)]; const float *xp6 = &X[index2(r + 6, 0, ldX)]; const float *xp7 = &X[index2(r + 7, 0, ldX)]; const float *xp8 = &X[index2(r + 8, 0, ldX)]; const float *xp9 = &X[index2(r + 9, 0, ldX)]; const float *xp10 = &X[index2(r + 10, 0, ldX)]; const float *xp11 = &X[index2(r + 11, 0, ldX)]; const float *xp12 = &X[index2(r + 12, 0, ldX)]; const float *xp13 = &X[index2(r + 13, 0, ldX)]; const float *xp14 = &X[index2(r + 14, 0, ldX)]; const float *xp15 = &X[index2(r + 15, 0, ldX)]; float *yp = &Y[r]; float *yp1 = yp + ldY; float *yp2 = yp1 + ldY; float *yp3 = yp2 + ldY; ALPHA_INT colX4 = colX - 3; ALPHA_INT c = 0; for (; c < colX4; c+=4) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); *(yp + 4) = *(xp4++); *(yp + 5) = *(xp5++); *(yp + 6) = *(xp6++); *(yp + 7) = *(xp7++); *(yp + 8) = *(xp8++); *(yp + 9) = *(xp9++); *(yp + 10) = *(xp10++); *(yp + 11) = *(xp11++); *(yp + 12) = *(xp12++); *(yp + 13) = *(xp13++); *(yp + 14) = *(xp14++); *(yp + 15) = *(xp15++); yp += 4*ldY; *yp1 = *(xp0++); *(yp1 + 1) = *(xp1++); *(yp1 + 2) = *(xp2++); *(yp1 + 3) = *(xp3++); *(yp1 + 4) = *(xp4++); *(yp1 + 5) = *(xp5++); *(yp1 + 6) = *(xp6++); *(yp1 + 7) = *(xp7++); *(yp1 + 8) = *(xp8++); *(yp1 + 9) = *(xp9++); *(yp1 + 10) = *(xp10++); *(yp1 + 11) = *(xp11++); *(yp1 + 12) = *(xp12++); *(yp1 + 13) = *(xp13++); *(yp1 + 14) = *(xp14++); *(yp1 + 15) = *(xp15++); yp1 += 4*ldY; *yp2 = *(xp0++); *(yp2 + 1) = *(xp1++); *(yp2 + 2) = *(xp2++); *(yp2 + 3) = *(xp3++); *(yp2 + 4) = *(xp4++); *(yp2 + 5) = *(xp5++); *(yp2 + 6) = *(xp6++); *(yp2 + 7) = *(xp7++); *(yp2 + 8) = *(xp8++); *(yp2 + 9) = *(xp9++); *(yp2 + 10) = *(xp10++); *(yp2 + 11) = *(xp11++); *(yp2 + 12) = *(xp12++); *(yp2 + 13) = *(xp13++); *(yp2 + 14) = *(xp14++); *(yp2 + 15) = *(xp15++); yp2 += 4*ldY; *yp3 = *(xp0++); *(yp3 + 1) = *(xp1++); *(yp3 + 2) = *(xp2++); *(yp3 + 3) = *(xp3++); *(yp3 + 4) = *(xp4++); *(yp3 + 5) = *(xp5++); *(yp3 + 6) = *(xp6++); *(yp3 + 7) = *(xp7++); *(yp3 + 8) = *(xp8++); *(yp3 + 9) = *(xp9++); *(yp3 + 10) = *(xp10++); *(yp3 + 11) = *(xp11++); *(yp3 + 12) = *(xp12++); *(yp3 + 13) = *(xp13++); *(yp3 + 14) = *(xp14++); *(yp3 + 15) = *(xp15++); yp3 += 4*ldY; } for (; c < colX; c++) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); *(yp + 4) = *(xp4++); *(yp + 5) = *(xp5++); *(yp + 6) = *(xp6++); *(yp + 7) = *(xp7++); *(yp + 8) = *(xp8++); *(yp + 9) = *(xp9++); *(yp + 10) = *(xp10++); *(yp + 11) = *(xp11++); *(yp + 12) = *(xp12++); *(yp + 13) = *(xp13++); *(yp + 14) = *(xp14++); *(yp + 15) = *(xp15++); yp += ldY; } } for (ALPHA_INT r = rowX4 < 0 ? 0 : rowX4; r < rowX; r += 1) { const float *xp0 = &X[index2(r, 0, ldX)]; #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT c = 0; c < colX; ++c) { Y[index2(c, r, ldY)] = xp0[c]; } } } void pack_matrix_col2row_d(const ALPHA_INT rowX, const ALPHA_INT colX, const double *X, const ALPHA_INT ldX, double * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT colX4 = colX - 3; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT c = 0; c < colX4; c += 4) { const double *xp0 = &X[index2(c, 0, ldX)]; const double *xp1 = &X[index2(c + 1, 0, ldX)]; const double *xp2 = &X[index2(c + 2, 0, ldX)]; const double *xp3 = &X[index2(c + 3, 0, ldX)]; double *yp = &Y[c]; for (ALPHA_INT r = 0; r < rowX; ++r) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); yp += ldY; } } for (ALPHA_INT c = colX4 < 0 ? 0 : colX4; c < colX; c += 1) { const double *xp0 = &X[index2(c, 0, ldX)]; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT r = 0; r < rowX; ++r) { Y[index2(r, c, ldY)] = xp0[r]; } } } void pack_matrix_row2col_d(const ALPHA_INT rowX, const ALPHA_INT colX, const double *X, const ALPHA_INT ldX, double * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = 8 > alpha_get_thread_num() ? alpha_get_thread_num() : 8; ALPHA_INT rowX4 = rowX - 7; #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < rowX4; r += 8) { const double *xp0 = &X[index2(r, 0, ldX)]; const double *xp1 = &X[index2(r + 1, 0, ldX)]; const double *xp2 = &X[index2(r + 2, 0, ldX)]; const double *xp3 = &X[index2(r + 3, 0, ldX)]; const double *xp4 = &X[index2(r + 4, 0, ldX)]; const double *xp5 = &X[index2(r + 5, 0, ldX)]; const double *xp6 = &X[index2(r + 6, 0, ldX)]; const double *xp7 = &X[index2(r + 7, 0, ldX)]; // const double *xp8 = &X[index2(r + 8, 0, ldX)]; // const double *xp9 = &X[index2(r + 9, 0, ldX)]; // const double *xp10 = &X[index2(r + 10, 0, ldX)]; // const double *xp11 = &X[index2(r + 11, 0, ldX)]; // const double *xp12 = &X[index2(r + 12, 0, ldX)]; // const double *xp13 = &X[index2(r + 13, 0, ldX)]; // const double *xp14 = &X[index2(r + 14, 0, ldX)]; // const double *xp15 = &X[index2(r + 15, 0, ldX)]; double *yp = &Y[r]; double *yp1 = yp + ldY; double *yp2 = yp1 + ldY; double *yp3 = yp2 + ldY; ALPHA_INT colX4 = colX - 3; ALPHA_INT c = 0; for (; c < colX4; c+=4) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); *(yp + 4) = *(xp4++); *(yp + 5) = *(xp5++); *(yp + 6) = *(xp6++); *(yp + 7) = *(xp7++); // *(yp + 8) = *(xp8++); // *(yp + 9) = *(xp9++); // *(yp + 10) = *(xp10++); // *(yp + 11) = *(xp11++); // *(yp + 12) = *(xp12++); // *(yp + 13) = *(xp13++); // *(yp + 14) = *(xp14++); // *(yp + 15) = *(xp15++); yp += 4*ldY; *yp1 = *(xp0++); *(yp1 + 1) = *(xp1++); *(yp1 + 2) = *(xp2++); *(yp1 + 3) = *(xp3++); *(yp1 + 4) = *(xp4++); *(yp1 + 5) = *(xp5++); *(yp1 + 6) = *(xp6++); *(yp1 + 7) = *(xp7++); // *(yp1 + 8) = *(xp8++); // *(yp1 + 9) = *(xp9++); // *(yp1 + 10) = *(xp10++); // *(yp1 + 11) = *(xp11++); // *(yp1 + 12) = *(xp12++); // *(yp1 + 13) = *(xp13++); // *(yp1 + 14) = *(xp14++); // *(yp1 + 15) = *(xp15++); yp1 += 4*ldY; *yp2 = *(xp0++); *(yp2 + 1) = *(xp1++); *(yp2 + 2) = *(xp2++); *(yp2 + 3) = *(xp3++); *(yp2 + 4) = *(xp4++); *(yp2 + 5) = *(xp5++); *(yp2 + 6) = *(xp6++); *(yp2 + 7) = *(xp7++); // *(yp2 + 8) = *(xp8++); // *(yp2 + 9) = *(xp9++); // *(yp2 + 10) = *(xp10++); // *(yp2 + 11) = *(xp11++); // *(yp2 + 12) = *(xp12++); // *(yp2 + 13) = *(xp13++); // *(yp2 + 14) = *(xp14++); // *(yp2 + 15) = *(xp15++); yp2 += 4*ldY; *yp3 = *(xp0++); *(yp3 + 1) = *(xp1++); *(yp3 + 2) = *(xp2++); *(yp3 + 3) = *(xp3++); *(yp3 + 4) = *(xp4++); *(yp3 + 5) = *(xp5++); *(yp3 + 6) = *(xp6++); *(yp3 + 7) = *(xp7++); // *(yp3 + 8) = *(xp8++); // *(yp3 + 9) = *(xp9++); // *(yp3 + 10) = *(xp10++); // *(yp3 + 11) = *(xp11++); // *(yp3 + 12) = *(xp12++); // *(yp3 + 13) = *(xp13++); // *(yp3 + 14) = *(xp14++); // *(yp3 + 15) = *(xp15++); yp3 += 4*ldY; } for (; c < colX; c++) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); *(yp + 4) = *(xp4++); *(yp + 5) = *(xp5++); *(yp + 6) = *(xp6++); *(yp + 7) = *(xp7++); // *(yp + 8) = *(xp8++); // *(yp + 9) = *(xp9++); // *(yp + 10) = *(xp10++); // *(yp + 11) = *(xp11++); // *(yp + 12) = *(xp12++); // *(yp + 13) = *(xp13++); // *(yp + 14) = *(xp14++); // *(yp + 15) = *(xp15++); yp += ldY; } } for (ALPHA_INT r = rowX4 < 0 ? 0 : rowX4; r < rowX; r += 1) { const double *xp0 = &X[index2(r, 0, ldX)]; #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT c = 0; c < colX; ++c) { Y[index2(c, r, ldY)] = xp0[c]; } } } void pack_matrix_col2row_c(const ALPHA_INT rowX, const ALPHA_INT colX, const ALPHA_Complex8 *X, const ALPHA_INT ldX, ALPHA_Complex8 * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT colX4 = colX - 3; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT c = 0; c < colX4; c += 4) { const ALPHA_Complex8 *xp0 = &X[index2(c, 0, ldX)]; const ALPHA_Complex8 *xp1 = &X[index2(c + 1, 0, ldX)]; const ALPHA_Complex8 *xp2 = &X[index2(c + 2, 0, ldX)]; const ALPHA_Complex8 *xp3 = &X[index2(c + 3, 0, ldX)]; ALPHA_Complex8 *yp = &Y[c]; for (ALPHA_INT r = 0; r < rowX; ++r) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); yp += ldY; } } for (ALPHA_INT c = (colX4 < 0 ? 0 : colX4); c < colX; c += 1) { const ALPHA_Complex8 *xp0 = &X[index2(c, 0, ldX)]; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT r = 0; r < rowX; ++r) { Y[index2(r, c, ldY)] = xp0[r]; } } } void pack_matrix_row2col_c(const ALPHA_INT rowX, const ALPHA_INT colX, const ALPHA_Complex8 *X, const ALPHA_INT ldX, ALPHA_Complex8 * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT rowX4 = rowX - 3; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT r = 0; r < rowX4; r += 4) { const ALPHA_Complex8 *xp0 = &X[index2(r, 0, ldX)]; const ALPHA_Complex8 *xp1 = &X[index2(r + 1, 0, ldX)]; const ALPHA_Complex8 *xp2 = &X[index2(r + 2, 0, ldX)]; const ALPHA_Complex8 *xp3 = &X[index2(r + 3, 0, ldX)]; ALPHA_Complex8 *yp = &Y[r]; for (ALPHA_INT c = 0; c < colX; ++c) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); yp += ldY; } } for (ALPHA_INT r = rowX4 < 0 ? 0 : rowX4; r < rowX; r += 1) { const ALPHA_Complex8 *xp0 = &X[index2(r, 0, ldX)]; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT c = 0; c < colX; ++c) { Y[index2(c, r, ldY)] = xp0[c]; } } } void pack_matrix_col2row_z(const ALPHA_INT rowX, const ALPHA_INT colX, const ALPHA_Complex16 *X, const ALPHA_INT ldX, ALPHA_Complex16 * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT colX4 = colX - 3; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT c = 0; c < colX4; c += 4) { const ALPHA_Complex16 *xp0 = &X[index2(c, 0, ldX)]; const ALPHA_Complex16 *xp1 = &X[index2(c + 1, 0, ldX)]; const ALPHA_Complex16 *xp2 = &X[index2(c + 2, 0, ldX)]; const ALPHA_Complex16 *xp3 = &X[index2(c + 3, 0, ldX)]; ALPHA_Complex16 *yp = &Y[c]; for (ALPHA_INT r = 0; r < rowX; ++r) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); yp += ldY; } } for (ALPHA_INT c = colX4 < 0 ? 0 : colX4; c < colX; c += 1) { const ALPHA_Complex16 *xp0 = &X[index2(c, 0, ldX)]; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT r = 0; r < rowX; ++r) { Y[index2(r, c, ldY)] = xp0[r]; } } } void pack_matrix_row2col_z(const ALPHA_INT rowX, const ALPHA_INT colX, const ALPHA_Complex16 *X, const ALPHA_INT ldX, ALPHA_Complex16 * Y, ALPHA_INT ldY) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT rowX4 = rowX - 3; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT r = 0; r < rowX4; r += 4) { const ALPHA_Complex16 *xp0 = &X[index2(r, 0, ldX)]; const ALPHA_Complex16 *xp1 = &X[index2(r + 1, 0, ldX)]; const ALPHA_Complex16 *xp2 = &X[index2(r + 2, 0, ldX)]; const ALPHA_Complex16 *xp3 = &X[index2(r + 3, 0, ldX)]; ALPHA_Complex16 *yp = &Y[r]; for (ALPHA_INT c = 0; c < colX; ++c) { *yp = *(xp0++); *(yp + 1) = *(xp1++); *(yp + 2) = *(xp2++); *(yp + 3) = *(xp3++); yp += ldY; } } for (ALPHA_INT r = rowX4 < 0 ? 0 : rowX4; r < rowX; r += 1) { const ALPHA_Complex16 *xp0 = &X[index2(r, 0, ldX)]; // #ifdef _OPENMP // #pragma omp parallel for num_threads(num_threads) // #endif for (ALPHA_INT c = 0; c < colX; ++c) { Y[index2(c, r, ldY)] = xp0[c]; } } }
4284.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp parallel for schedule(static, 2) simd num_threads(2) for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp parallel for schedule(static, 2) simd num_threads(2) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
luks_fmt_plug.c
/* luks.c * * hashkill - a hash cracking tool * Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu> * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_luks; #elif FMT_REGISTERS_H john_register_one(&fmt_luks); #else #if AC_BUILT #include "autoconfig.h" #else #define _LARGEFILE64_SOURCE 1 #endif #include "jumbo.h" // large file support #include "os.h" #include <stdio.h> #include <string.h> #include <assert.h> #include <errno.h> #include <stdint.h> #include <stdlib.h> #include <sys/types.h> #include "aes.h" #include "sha.h" #include "sha2.h" #include <string.h> #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memory.h" #include "base64.h" #include "pbkdf2_hmac_sha1.h" #include "dyna_salt.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define LUKS_MAGIC_L 6 #define LUKS_CIPHERNAME_L 32 #define LUKS_CIPHERMODE_L 32 #define LUKS_HASHSPEC_L 32 #define UUID_STRING_L 40 #define LUKS_DIGESTSIZE 20 #define LUKS_SALTSIZE 32 #define LUKS_NUMKEYS 8 #define FORMAT_LABEL "LUKS" #define FORMAT_NAME "" #define FORMAT_TAG "$luks$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define PLAINTEXT_LENGTH 125 #define BENCHMARK_LENGTH -1 #define BINARY_SIZE LUKS_DIGESTSIZE #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt_LUKS*) #define SALT_ALIGN sizeof(struct custom_salt_LUKS*) #if SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #if ARCH_LITTLE_ENDIAN #define john_htonl(x) ((((x)>>24) & 0xffL) | (((x)>>8) & 0xff00L) | \ (((x)<<8) & 0xff0000L) | (((x)<<24) & 0xff000000L)) #define john_ntohl(x) ((((x)>>24) & 0xffL) | (((x)>>8) & 0xff00L) | \ (((x)<<8) & 0xff0000L) | (((x)<<24) & 0xff000000L)) #else #define john_htonl(x) (x) #define john_ntohl(x) (x) #endif #include "luks_insane_tests.h" /* taken from LUKS on disk format specification */ struct luks_phdr { char magic[LUKS_MAGIC_L]; uint16_t version; char cipherName[LUKS_CIPHERNAME_L]; char cipherMode[LUKS_CIPHERMODE_L]; char hashSpec[LUKS_HASHSPEC_L]; uint32_t payloadOffset; uint32_t keyBytes; char mkDigest[LUKS_DIGESTSIZE]; char mkDigestSalt[LUKS_SALTSIZE]; uint32_t mkDigestIterations; char uuid[UUID_STRING_L]; struct { uint32_t active; uint32_t passwordIterations; char passwordSalt[LUKS_SALTSIZE]; uint32_t keyMaterialOffset; uint32_t stripes; } keyblock[LUKS_NUMKEYS]; }; static struct custom_salt_LUKS { dyna_salt dsalt; char path[8192]; int loaded; struct luks_phdr myphdr; int afsize; int bestslot; int bestiter; unsigned char cipherbuf[1]; } *cur_salt; static void XORblock(char *src1, char *src2, char *dst, int n) { int j; for (j = 0; j < n; j++) dst[j] = src1[j] ^ src2[j]; } static int diffuse(unsigned char *src, unsigned char *dst, int size) { uint32_t i; uint32_t IV; /* host byte order independent hash IV */ SHA_CTX ctx; int fullblocks = (size) / 20; int padding = size % 20; for (i = 0; i < fullblocks; i++) { IV = john_htonl(i); SHA1_Init(&ctx); SHA1_Update(&ctx, &IV, 4); SHA1_Update(&ctx, src + 20 * i, 20); SHA1_Final(dst + 20 * i, &ctx); } if (padding) { IV = john_htonl(fullblocks); SHA1_Init(&ctx); SHA1_Update(&ctx, &IV, 4); SHA1_Update(&ctx, src + 20 * fullblocks, padding); SHA1_Final(dst + 20 * fullblocks, &ctx); } return 0; } static int AF_merge(unsigned char *src, unsigned char *dst, int afsize, int stripes) { int i; char *bufblock; int blocksize = afsize / stripes; bufblock = mem_calloc(1, blocksize + 20); for (i = 0; i < (stripes - 1); i++) { XORblock((char *) (src + (blocksize * i)), bufblock, bufblock, blocksize); diffuse((unsigned char *) bufblock, (unsigned char *) bufblock, blocksize); } XORblock((char *) (src + blocksize * (stripes - 1)), bufblock, (char *) dst, blocksize); MEM_FREE(bufblock); return 0; } static int af_sectors(int blocksize, int blocknumbers) { int af_size; af_size = blocksize * blocknumbers; af_size = (af_size + 511) / 512; af_size *= 512; return af_size; } static void decrypt_aes_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int size, struct custom_salt_LUKS *cs) { AES_KEY aeskey; unsigned char essiv[16]; unsigned char essivhash[32]; unsigned a; SHA256_CTX ctx; unsigned char sectorbuf[16]; unsigned char zeroiv[16]; // This should NEVER be done in the loop!! This never changed. SHA256_Init(&ctx); SHA256_Update(&ctx, key, john_ntohl(cs->myphdr.keyBytes)); SHA256_Final(essivhash, &ctx); memset(sectorbuf, 0, 16); memset(essiv, 0, 16); for (a = 0; a < (size / 512); a++) { memset(zeroiv, 0, 16); #if ARCH_LITTLE_ENDIAN memcpy(sectorbuf, &a, 4); #else { unsigned b = JOHNSWAP(a); memcpy(sectorbuf, &b, 4); } #endif AES_set_encrypt_key(essivhash, 256, &aeskey); AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT); AES_set_decrypt_key(key, john_ntohl(cs->myphdr.keyBytes)*8, &aeskey); AES_cbc_encrypt((src+a*512), (dst+a*512), 512, &aeskey, essiv, AES_DECRYPT); } } static int hash_plugin_parse_hash(char *filename, unsigned char **cp, int afsize, int is_critical) { FILE *myfile; int readbytes; myfile = jtr_fopen(filename, "rb"); if (!myfile) { fprintf(stderr, "\n%s : %s!\n", filename, strerror(errno)); return -1; } // can this go over 4gb? *cp =(unsigned char*) mem_calloc(1, afsize + 1); if (!*cp) goto bad; // printf(">>> %d\n", cs->afsize); readbytes = fread(*cp, afsize, 1, myfile); if (readbytes < 0) { fprintf(stderr, "%s : unable to read required data\n", filename); goto bad; } fclose(myfile); return afsize+1; bad: fclose(myfile); if (is_critical) { fprintf(stderr, "\nLUKS plug-in is unable to continue due to errors!\n"); error(); } return -1; } static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { static int warned = 0; // extern struct fmt_main fmt_luks; #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); /* * LUKS format will need to be redesigned to address the issues mentioned in * https://github.com/magnumripper/JohnTheRipper/issues/557. * This will require a change in john's hash representation for LUKS format. * The redesign will happen after the next official jumbo release. * To avoid having to support the current LUKS hash representation forever, * just print a warning that the hash representation will change in future releases. * * So far, no "official" jumbo release supports the LUKS format, currently only * users of bleeding-jumbo may have used LUKS format. These users should be able * to re-run luks2john and retry the passwords that have been stored for the current LUKS hashes * once the redesign of john's LUKS format implementation has been completed.) */ if (!options.listconf && !(options.flags & FLG_TEST_CHK) && warned++ == 0) { fprintf(stderr, "WARNING, LUKS format hash representation will change in future releases,\n" "see doc/README.LUKS\n"); // FIXME: address github issue #557 after 1.8.0-jumbo-1 fflush(stderr); } // This printf will 'help' debug a system that truncates that monster hash, but does not cause compiler to die. // printf ("length=%d end=%s\n", strlen(fmt_luks.params.tests[0].ciphertext), &((fmt_luks.params.tests[0].ciphertext)[strlen(fmt_luks.params.tests[0].ciphertext)-30])); #ifdef _MSC_VER LUKS_test_fixup(); #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p, *q; unsigned char *buf; int is_inlined, i, bestslot=0; int res; int afsize; unsigned char *out; struct custom_salt_LUKS cs; uint64_t keybytes, stripes; unsigned int bestiter = 0xFFFFFFFF; out = (unsigned char*)&cs.myphdr; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* is_inlined */ goto err; if (!isdec(p)) goto err; is_inlined = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) goto err; if (!isdec(p)) goto err; afsize = atoi(p); if (afsize != sizeof(struct luks_phdr)) goto err; if ((p = strtokm(NULL, "$")) == NULL) goto err; if (afsize != strlen(p) / 2) goto err; if (!ishexlc(p)) goto err; for (i = 0; i < afsize; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } keybytes = john_ntohl(cs.myphdr.keyBytes); for (i = 0; i < LUKS_NUMKEYS; i++) { if ((john_ntohl(cs.myphdr.keyblock[i].passwordIterations) < bestiter) && (john_ntohl(cs.myphdr.keyblock[i].passwordIterations) > 1) && (john_ntohl(cs.myphdr.keyblock[i].active) == 0x00ac71f3)) { bestslot = i; bestiter = john_ntohl(cs.myphdr.keyblock[i].passwordIterations); } } stripes = john_ntohl(cs.myphdr.keyblock[bestslot].stripes); if ( (uint64_t)(john_ntohl(cs.myphdr.keyBytes)*john_ntohl(cs.myphdr.keyblock[bestslot].stripes)) != keybytes*stripes) goto err; if ((p = strtokm(NULL, "$")) == NULL) goto err; if (!isdec(p)) goto err; res = atoi(p); if (res != keybytes*stripes) goto err; if (is_inlined) { if ((p = strtokm(NULL, "$")) == NULL) goto err; if ((p = strtokm(NULL, "$")) == NULL) goto err; if (strlen(p) != LUKS_DIGESTSIZE * 2) goto err; if (!ishexlc(p)) goto err; } else { if ((p = strtokm(NULL, "$")) == NULL) /* LUKS file */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* dump file */ goto err; q = p; if ((p = strtokm(NULL, "$")) == NULL) /* mkDigest */ goto err; if (strlen(p) != LUKS_DIGESTSIZE * 2) goto err; if (!ishexlc(p)) goto err; /* more tests */ if (hash_plugin_parse_hash(q, &buf, afsize, 0) == -1) { return 0; } MEM_FREE(buf); } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int is_inlined; int res; int i; int cnt; unsigned char *out; unsigned char *buf; struct custom_salt_LUKS cs, *psalt; static unsigned char *ptr; unsigned int bestiter = 0xFFFFFFFF; size_t size = 0; ctcopy += FORMAT_TAG_LEN; if (!ptr) ptr = mem_alloc_tiny(sizeof(struct custom_salt*),sizeof(struct custom_salt*)); memset(&cs, 0, sizeof(cs)); out = (unsigned char*)&cs.myphdr; p = strtokm(ctcopy, "$"); is_inlined = atoi(p); /* common handling */ p = strtokm(NULL, "$"); res = atoi(p); assert(res == sizeof(struct luks_phdr)); p = strtokm(NULL, "$"); for (i = 0; i < res; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } p = strtokm(NULL, "$"); res = atoi(p); if (is_inlined) { p = strtokm(NULL, "$"); size = strlen(p) / 4 * 3 + 1; buf = mem_calloc(1, size+4); base64_decode(p, strlen(p), (char*)buf); cs.afsize = size; } else { cs.afsize = res; p = strtokm(NULL, "$"); p = strtokm(NULL, "$"); strcpy(cs.path, p); size = hash_plugin_parse_hash(cs.path, &buf, cs.afsize, 1); } for (cnt = 0; cnt < LUKS_NUMKEYS; cnt++) { if ((john_ntohl(cs.myphdr.keyblock[cnt].passwordIterations) < bestiter) && (john_ntohl(cs.myphdr.keyblock[cnt].passwordIterations) > 1) && (john_ntohl(cs.myphdr.keyblock[cnt].active) == 0x00ac71f3)) { cs.bestslot = cnt; cs.bestiter = john_ntohl(cs.myphdr.keyblock[cnt].passwordIterations); } } cs.afsize = af_sectors(john_ntohl(cs.myphdr.keyBytes), john_ntohl(cs.myphdr.keyblock[cs.bestslot].stripes)); assert(res == cs.afsize); MEM_FREE(keeptr); psalt = (struct custom_salt_LUKS*)mem_alloc_tiny(sizeof(struct custom_salt_LUKS)+size, 4); memcpy(psalt, &cs, sizeof(cs)); memcpy(psalt->cipherbuf, buf, size); MEM_FREE(buf); psalt->dsalt.salt_alloc_needs_free = 0; // set the JtR core linkage stuff for this dyna_salt psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt_LUKS, myphdr); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt_LUKS, myphdr, cipherbuf, size); memcpy(ptr, &psalt, sizeof(struct custom_salt*)); return (void*)ptr; } static void *get_binary(char *ciphertext) { static union { unsigned char c[LUKS_DIGESTSIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < LUKS_DIGESTSIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = *(struct custom_salt_LUKS **)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { unsigned char *af_decrypted = (unsigned char *)mem_alloc(cur_salt->afsize + 20); int i, iterations = cur_salt->bestiter; int dklen = john_ntohl(cur_salt->myphdr.keyBytes); uint32_t keycandidate[MAX_KEYS_PER_CRYPT][256/4]; uint32_t masterkeycandidate[MAX_KEYS_PER_CRYPT][256/4]; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT]; union { uint32_t *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = keycandidate[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)(cur_salt->myphdr.keyblock[cur_salt->bestslot].passwordSalt), LUKS_SALTSIZE, iterations, &(x.poutc), dklen, 0); #else pbkdf2_sha1((const unsigned char *)saved_key[index], strlen(saved_key[index]), (const unsigned char*)(cur_salt->myphdr.keyblock[cur_salt->bestslot].passwordSalt), LUKS_SALTSIZE, iterations, (unsigned char*)keycandidate[0], dklen, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { // Decrypt the blocksi decrypt_aes_cbc_essiv(cur_salt->cipherbuf, af_decrypted, (unsigned char*)keycandidate[i], cur_salt->afsize, cur_salt); // AFMerge the blocks AF_merge(af_decrypted, (unsigned char*)masterkeycandidate[i], cur_salt->afsize, john_ntohl(cur_salt->myphdr.keyblock[cur_salt->bestslot].stripes)); } // pbkdf2 again #ifdef SIMD_COEF_32 for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = john_ntohl(cur_salt->myphdr.keyBytes); pin[i] = (unsigned char*)masterkeycandidate[i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)cur_salt->myphdr.mkDigestSalt, LUKS_SALTSIZE, john_ntohl(cur_salt->myphdr.mkDigestIterations), &(x.poutc), LUKS_DIGESTSIZE, 0); #else pbkdf2_sha1((unsigned char*)masterkeycandidate[0], john_ntohl(cur_salt->myphdr.keyBytes), (const unsigned char*)cur_salt->myphdr.mkDigestSalt, LUKS_SALTSIZE, john_ntohl(cur_salt->myphdr.mkDigestIterations), (unsigned char*)crypt_out[index], LUKS_DIGESTSIZE, 0); #endif MEM_FREE(af_decrypted); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], LUKS_DIGESTSIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], LUKS_DIGESTSIZE); } static int cmp_exact(char *source, int index) { return 1; } static void luks_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_luks = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, luks_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, luks_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
node-core.h
#ifndef ARGOS_NODE_CORE #define ARGOS_NODE_CORE #include <iostream> #include <sstream> #include <boost/lexical_cast.hpp> #include "array.h" #include "argos.h" #include "blas-wrapper.h" namespace argos { namespace role { class ArrayLabelInput: public virtual Role { public: virtual Array<double> const &labels () const = 0; }; } namespace core { class Meta: public Node { double m_mom; double m_eta; double m_lambda; public: Meta (Model *model, Config const &config) : Node(model, config), m_mom(getConfig<double>("mom", "argos.global.mom", 0)), m_eta(getConfig<double>("eta", "argos.global.eta", 0.0005)), m_lambda(getConfig<double>("lambda", "argos.global.lambda", 0.5)) { } double mom () const { return m_mom; } double eta () const { return m_eta; } double lambda () const { return m_lambda; } }; class ArrayNode: public Node { public: enum { FLAT = 0, SOUND = 1, IMAGE = 2, }; private: vector<size_t> m_size; Array<> m_data; Array<> m_delta; int m_type; protected: void resize (ArrayNode const &node) { m_size = node.m_size; m_data.resize(m_size); m_delta.resize(m_size); } void resize (vector<size_t> const &size) { m_size = size; m_data.resize(size); m_delta.resize(size); } void setType (int type) { m_type = type; } public: ArrayNode (Model *model, Config const &config) : Node(model, config), m_type(FLAT) { } vector<size_t> const& size () const { return m_size; } Array<> &data () { return m_data; } Array<> &delta () { return m_delta; } Array<> const &data () const { return m_data; } Array<> const &delta () const { return m_delta; } void preupdate () { delta().fill(0); } int type () const { return m_type; } void report (ostream &os) const { os << name() << ":\tdata/" << data().l2() << "\tdelta/" << delta().l2() << endl; } virtual void handle (http::server::request const &req, http::server::reply &rep) const { rep.status = http::server::reply::ok; ostringstream ss; size_t sz = data().size(); size_t samples = data().size(size_t(0)); size_t dim = sz / samples; for (unsigned i = 0; i < samples; ++i) { Array<>::value_type const *x = data().at(i); for (unsigned j = 0; j < dim; ++j) { if (j) ss << '\t'; ss << x[j]; } ss << endl; } rep.content = ss.str(); rep.headers.resize(2); rep.headers[0].name = "Content-Length"; rep.headers[0].value = boost::lexical_cast<string>(rep.content.size()); rep.headers[1].name = "Content-Type"; rep.headers[1].value = "text/plain"; } }; /* class InputNode: public ArrayNode { public: InputNode (Model *model, Config const &config): ArrayNode(model, config) { vector<size_t> size; size.push_back(model->config().get<size_t>("argos.batch")); try { try { size.push_back(config.get<size_t>("height")); setType(IMAGE); } catch (...) { setType(SOUND); } size.push_back(config.get<size_t>("width")); } catch (...) { // default is flat } size.push_back(config.get<size_t>("channel")); resize(size); } }; */ /* class GaussianOutputNode: public ArrayOutputNode { ArrayNode *m_input; public: GaussianOutputNode (Model *model, Config const &config) : ArrayOutputNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); resize(*m_input); } void update (Mode mode) { m_input->delta().add_diff(m_input->data(), this->data()); } void cost (vector<double> *c) const { c->resize(1); c->at(0) = data().l2sqr(m_input->data()) / 2; } }; */ class MaxScoreOutputNode: public LabelOutputNode<int> { protected: ArrayNode *m_input; size_t m_samples; size_t m_stride; public: MaxScoreOutputNode (Model *model, Config const &config) : LabelOutputNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); m_samples = m_input->data().size(size_t(0)); BOOST_VERIFY(m_input->data().size() % m_samples == 0); m_stride = m_input->data().size() / m_samples; } void predict () { m_labels.resize(m_samples); Array<>::value_type const *x = m_input->data().addr(); for (size_t i = 0; i < m_samples; ++i) { int best = 0; for (unsigned l = 1; l < m_stride; ++l) { if (x[l] > x[best]) { best = l; } } m_labels[i] = best; x += m_stride; } } }; // class LogPOutputNode: public MaxScoreOutputNode, public role::Loss { public: LogPOutputNode (Model *model, Config const &config) : MaxScoreOutputNode(model, config) { role::Loss::init({"loss", "error"}); } void predict () { MaxScoreOutputNode::predict(); Array<>::value_type const *x = m_input->data().addr(); vector<int> const &truth = inputLabels(); m_labels.resize(truth.size()); for (size_t i = 0; i < truth.size(); ++i) { Array<>::value_type mm = x[0]; int cmax = 1; for (size_t j = 1; j < m_stride; ++j) { if (x[j] > mm) { mm = x[j]; cmax = 1; } else if (x[j] == mm) { ++cmax; } } int l = truth[i]; acc(0)(-log(x[l])); if (x[l] < mm) { acc(1)(1.0); } else { acc(1)(1.0 - 1.0/cmax); } x += m_stride; } } void update () { Array<>::value_type const *x = m_input->data().addr(); Array<>::value_type *dx = m_input->delta().addr(); vector<int> const &truth = inputLabels(); for (size_t i = 0; i < m_samples; ++i) { int l = truth[i]; dx[l] += -1.0/x[l]; x += m_stride; dx += m_stride; } } }; class HingeLossOutputNode: public MaxScoreOutputNode, public role::Loss { double m_margin; public: HingeLossOutputNode (Model *model, Config const &config) : MaxScoreOutputNode(model, config), m_margin(config.get<double>("margin", 0.25)) { role::Loss::init({"loss", "error"}); } void predict () { MaxScoreOutputNode::predict(); Array<>::value_type const *x = m_input->data().addr(); vector<int> const &truth = inputLabels(); m_labels.resize(truth.size()); for (size_t i = 0; i < truth.size(); ++i) { int l = truth[i]; unsigned bad = 0; // sizeof left set double t = x[l]; double total = 0; for (unsigned c = 0; c < m_stride; ++c) { if (c == unsigned(l)) continue; if (x[c] >= t) ++bad; if (((x[c] + m_margin) >= t)) { total += x[c] + m_margin - t; } } acc(0)(total); acc(1)(bad ? 1.0 : 0.0); x += m_stride; } } void update () { Array<>::value_type const *x = m_input->data().addr(); Array<>::value_type *dx = m_input->delta().addr(); vector<int> const &truth = inputLabels(); for (size_t i = 0; i < m_samples; ++i) { int l = truth[i]; unsigned left = 0; // sizeof left set double t = x[l]; for (unsigned c = 0; c < m_stride; ++c) { if (c == unsigned(l)) continue; if (((x[c] + m_margin) >= t)) { dx[c] += 1.0; ++left; } } dx[l] += -1.0 * left; x += m_stride; dx += m_stride; } } }; /** * loss = 0.5 | x - t| ^2 * d loss/ /dx = x - t */ class RegressionOutputNode: public LabelOutputNode<double>, public role::Loss { ArrayNode *m_input; double m_margin; public: RegressionOutputNode (Model *model, Config const &config) : LabelOutputNode<double>(model, config), m_input(findInputAndAdd<ArrayNode>("input", "input")), m_margin(config.get<double>("margin", 0)) { role::Loss::init({"loss", "error"}); } void predict () { Array<>::value_type const *x = m_input->data().addr(); vector<double> const &truth = inputLabels(); m_labels.resize(truth.size()); for (size_t i = 0; i < truth.size(); ++i) { m_labels[i] = x[i]; double diff = x[i] - truth[i]; double n2 = diff * diff; if (std::abs(diff) >= m_margin) { acc(0)(0.5 * n2); } else { acc(0)(0); } acc(1)(std::abs(diff)); } } void update () { Array<>::value_type const *x = m_input->data().addr(); Array<>::value_type *dx = m_input->delta().addr(); vector<double> const &truth = inputLabels(); for (size_t i = 0; i < truth.size(); ++i) { double diff = x[i] - truth[i]; if (std::abs(diff) >= m_margin) { dx[i] = diff; } else { dx[i] = 0; } } } }; class ArrayOutputNode: public ArrayNode { role::ArrayLabelInput const *m_label_input; public: ArrayOutputNode (Model *model, Config const &config): ArrayNode(model, config) { m_label_input = model->findNode<role::ArrayLabelInput>(config.get<string>("label")); BOOST_VERIFY(m_label_input); } Array<double> const &inputLabels () const { return m_label_input->labels(); } }; class MultiRegressionOutputNode: public ArrayOutputNode, public role::Loss { ArrayNode *m_input; double m_margin; double m_rho; public: MultiRegressionOutputNode (Model *model, Config const &config) : ArrayOutputNode(model, config), m_input(findInputAndAdd<ArrayNode>("input", "input")), m_margin(config.get<double>("margin", 0)), m_rho(config.get<double>("rho", 1.0)) { role::Loss::init({"loss", "error"}); } void predict () { Array<>::value_type const *x = m_input->data().addr(); Array<>::value_type const *y = inputLabels().addr(); vector<size_t> sz_x; vector<size_t> sz_y; m_input->data().size(&sz_x); inputLabels().size(&sz_y); BOOST_VERIFY(sz_x.size() == 2); BOOST_VERIFY(sz_x == sz_y); unsigned i = 0; for (unsigned row = 0; row < sz_x[0]; ++row) { double l = 0; double e = 0; for (unsigned col = 0; col < sz_x[1]; ++col) { double diff = std::abs(x[i] - y[i]); ++i; double n2 = diff * diff; if (diff >= m_margin) { l += 0.5 * n2; } e += diff; } acc(0)(0); acc(1)(0); /* acc(0)(l); acc(1)(e); */ } } void update () { Array<>::value_type const *x = m_input->data().addr(); Array<>::value_type *dx = m_input->delta().addr(); Array<>::value_type const *y = inputLabels().addr(); vector<size_t> sz_x; vector<size_t> sz_y; m_input->data().size(&sz_x); inputLabels().size(&sz_y); BOOST_VERIFY(sz_x.size() == 2); BOOST_VERIFY(sz_x == sz_y); size_t total = sz_x[0] * sz_x[1]; for (size_t i = 0; i < total; ++i) { double diff = x[i] - y[i]; if (std::abs(diff) >= m_margin) { dx[i] = diff * m_rho; } else { dx[i] = 0; } } } void report (ostream &os) const { os << name() << ":\tlabel/" << inputLabels().l2() << "\tdata/" << m_input->data().l2() << endl; } }; namespace function { // each struct implement a forward function // which is the activate function itself, // and a backward function, which is the derivative // of the activate function as a function of y. // struct id { // identity, for testing static string name () { return "id"; } template <typename T> static T forward (T x) { return x; } template <typename T> static T backward (T x, T y) { return 1; } }; struct relu { static string name () { return "relu"; } template <typename T> static T forward (T x) { return x > 0 ? x : 0; } template <typename T> static T backward (T x, T y) { return x > 0 ? 1 : 0; } }; struct softrelu { static string name () { return "softrelu"; } template <typename T> static T forward (T x) { return log(1+exp(x)); } template <typename T> static T backward (T x, T y) { T e = exp(x); return e/(1+e); } }; struct tanh { static string name () { return "tanh"; } template <typename T> static T forward (T x) { return std::tanh(x); } template <typename T> static T backward (T x, T y) { return 1 - y * y; } }; struct logistic { static string name () { return "logistic"; } template <typename T> static T forward (T x) { return 1/(1 + std::exp(-x)); } template <typename T> static T backward (T x, T y) { return y * (1 - y); } }; } template <typename F> class FunctionNode: public ArrayNode { ArrayNode *m_input; public: FunctionNode (Model *model, Config const &config) : ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); resize(*m_input); setType(m_input->type()); } void predict () { data().apply(m_input->data(), [](Array<>::value_type &y, Array<>::value_type x){y = F::forward(x);}); } void update () { m_input->delta().apply(m_input->data(), data(), delta(), [](Array<>::value_type &dx, Array<>::value_type x, Array<>::value_type y, Array<>::value_type dy) { dx += F::backward(x, y) * dy; }); } }; class ParamNode: public ArrayNode, public role::Params { Meta *m_meta; double m_init; public: ParamNode (Model *model, Config const &config) : ArrayNode(model, config), m_meta(findInputAndAdd<Meta>("meta", "meta", "$meta")), m_init(config.get<double>("init", model->config().get<double>("argos.global.init", 0))) { vector<size_t> size; size.push_back(config.get<size_t>("size")); resize(size); } void sync (Node const *fromNode) { ParamNode const *from = dynamic_cast<ParamNode const *>(fromNode); BOOST_VERIFY(from); data().sync(from->data()); delta().sync(from->delta()); } void save (ostream &os) const { os.write((char const *)this->data().addr(), sizeof(Array<>::value_type) * this->data().size()); os.write((char const *)this->delta().addr(), sizeof(Array<>::value_type) * this->delta().size()); } void load (istream &is) { is.read((char *)this->data().addr(), sizeof(Array<>::value_type) * this->data().size()); is.read((char *)this->delta().addr(), sizeof(Array<>::value_type) * this->delta().size()); } void init () { delta().fill(0); if (m_init == 0) { //cerr << "INIT0 " << name() << endl; data().fill(0); } else { //cerr << "INIT " << name() << endl; std::normal_distribution<Array<>::value_type> normal(0, m_init); Model::Random &random = model()->random(); data().apply_serial([&normal, &random](Array<>::value_type &y) {y = normal(random);}); } } void predict () { if (mode() == MODE_TRAIN) { double dl2 = delta().l2(); double xl2 = data().l2(); if (m_meta->lambda()) { data().scale(1.0 - m_meta->lambda()); } if (m_meta->eta() * xl2 < dl2) { data().add_scaled(-m_meta->eta() * xl2 / dl2, delta()); } else { data().add_scaled(-m_meta->eta(), delta()); } } } void preupdate () { if (mode() == MODE_TRAIN) { if (m_meta->mom() == 0) { // m_mom has to be 0 for verify mode delta().fill(0); } else { delta().scale(m_meta->mom()); } // get norm scale /* double dl2 = delta().l2(); if (dl2 == 0) return; double rate = data().l2() / dl2 * m_meta->lambda(); */ /* if (m_meta->lambda()) { delta().add_scaled(m_meta->lambda(), data()); //delta().add_scaled(rate, data()); } */ } } size_t dim () const { return data().size(); } void perturb (size_t index, double epsilon) { auto addr = data().addr(); addr[index] += epsilon; } double gradient (size_t index) const { auto addr = delta().addr(); return addr[index]; } double value (size_t index) const { auto addr = data().addr(); return addr[index]; } }; class PadNode: public ArrayNode { ArrayNode *m_input; size_t m_pad_w, m_pad_h; vector<size_t> m_input_shape; vector<size_t> m_output_shape; public: PadNode (Model *model, Config const &config): ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); m_input->data().size(&m_input_shape); m_output_shape = m_input_shape; setType(m_input->type()); m_pad_w = config.get<size_t>("width"); m_pad_h = 0; if (type() == IMAGE) { m_pad_h = config.get<size_t>("height"); m_output_shape[1] += m_pad_h * 2; m_output_shape[2] += m_pad_w * 2; } else { m_output_shape[1] += m_pad_w * 2; } resize(m_output_shape); data().fill(0); delta().fill(0); } void predict () { if (type() == IMAGE) { Array<>::value_type const *in = m_input->data().addr(); for (size_t s = 0; s < m_input_shape[0]; ++s) { Array<>::value_type *out = data().addr(); out = data().walk<0>(out, s); out = data().walk<1>(out, m_pad_h); out = data().walk<2>(out, m_pad_w); for (size_t h = 0; h < m_input_shape[1]; ++h) { Array<>::value_type const *in_next = m_input->data().walk<1>(in); copy(in, in_next, out); in = in_next; out = data().walk<1>(out); } } } else { BOOST_VERIFY(0); } } void update () { if (type() == IMAGE) { Array<>::value_type *in = m_input->delta().addr(); for (size_t s = 0; s < m_input_shape[0]; ++s) { Array<>::value_type const *out = delta().addr(); out = delta().walk<0>(out, s); out = delta().walk<1>(out, m_pad_h); out = delta().walk<2>(out, m_pad_w); for (size_t h = 0; h < m_input_shape[1]; ++h) { Array<>::value_type *in_next = m_input->delta().walk<1>(in); size_t sz = in_next - in; for (size_t o = 0; o < sz; ++o) { in[o] += out[o]; } in = in_next; out = delta().walk<1>(out); } } BOOST_VERIFY(in == m_input->delta().addr() + m_input->delta().size()); } else { BOOST_VERIFY(0); } } }; class LinearNode: public ArrayNode { ArrayNode *m_input; ParamNode *m_weight; ParamNode *m_bias; bool m_local; size_t m_samples; size_t m_rows; // global: m_rows = m_samples // local: m_rows = m_samples * height [* width] size_t m_input_size; size_t m_output_size; public: LinearNode (Model *model, Config const &config) : ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); if (config.get<int>("local", 0) != 0) { //cerr << "LOCAL " << name() << endl; m_local = true; vector<size_t> size; m_input->data().size(&size); /* cerr << size.size() << ':'; for (auto const &v: size) { cerr << " " << v; } cerr << endl; */ m_input_size = size.back(); m_output_size = config.get<size_t>("channel"); BOOST_VERIFY(m_input->data().size() % m_input_size == 0); m_rows = m_input->data().size() / m_input_size; m_samples = size[0]; size.back() = m_output_size; resize(size); /* cerr << size.size() << ':'; for (auto const &v: size) { cerr << " " << v; } cerr << endl; */ //cerr << "ROWS: " << m_rows << endl; setType(m_input->type()); } else { m_local = false; m_samples = m_rows = m_input->data().size(size_t(0)); m_input_size = m_input->data().size() / m_samples; m_output_size = config.get<size_t>("channel"); vector<size_t> size; size.push_back(m_samples); size.push_back(m_output_size); resize(size); } m_weight = nullptr; try { //m_weight = model->findNode<ParamNode>("weight"); m_weight = findInputAndAdd<ParamNode>("weight", "weight"); } catch (...) { } if (m_weight == nullptr) { Config wconfig = config; wconfig.put("name", name() + "_weight"); wconfig.put("type", "param"); wconfig.put("size", m_input_size * m_output_size); wconfig.put("meta", config.get<string>("meta", "$meta")); m_weight = model->createNode<ParamNode>(wconfig); BOOST_VERIFY(m_weight); } addInput(m_weight, "weight"); m_bias = nullptr; try { m_bias = findInputAndAdd<ParamNode>("bias", "bias"); } catch (...) { } if (m_bias == nullptr) { Config bconfig = config; bconfig.put("name", name() + "_bias"); bconfig.put("type", "param"); bconfig.put("size", m_output_size); bconfig.put("init", 0); bconfig.put("meta", config.get<string>("meta", "$meta")); m_bias = model->createNode<ParamNode>(bconfig); BOOST_VERIFY(m_bias); } addInput(m_bias, "bias"); BOOST_VERIFY(m_bias->data().size() == m_output_size); BOOST_VERIFY(m_weight->data().size() == m_input_size * m_output_size); } void predict () { data().tile(m_bias->data()); blas::gemm<Array<>::value_type>(m_input->data().addr(), m_rows, m_input_size, false, m_weight->data().addr(), m_input_size, m_output_size, false, this->data().addr(), m_rows, m_output_size, 1.0, 1.0); } void update () { //cerr << "UPDATE " << name() << endl; // update input data blas::gemm<Array<>::value_type>(this->delta().addr(), m_rows, m_output_size, false, m_weight->data().addr(), m_input_size, m_output_size, true, m_input->delta().addr(), m_rows, m_input_size, 1.0, 1.0); // update weight data blas::gemm<Array<>::value_type>(m_input->data().addr(), m_rows, m_input_size, true, this->delta().addr(), m_rows, m_output_size, false, m_weight->delta().addr(), m_input_size, m_output_size, 1.0/m_samples, 1.0); m_bias->delta().add_scaled_wrapping(1.0/m_samples, delta()); } }; // global only, no local class MultiLinearNode: public ArrayNode { ArrayNode *m_input; ParamNode *m_weight; ParamNode *m_bias; size_t m_samples; size_t m_input_size; size_t m_fan_in; size_t m_output_size; public: MultiLinearNode (Model *model, Config const &config) : ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); m_samples = m_input->data().size(size_t(0)); m_input_size = m_input->data().size() / m_samples; m_output_size = config.get<size_t>("channel"); BOOST_VERIFY(m_input_size % m_output_size == 0); m_fan_in = m_input_size / m_output_size; vector<size_t> size; size.push_back(m_samples); size.push_back(m_output_size); resize(size); m_weight = nullptr; try { //m_weight = model->findNode<ParamNode>("weight"); m_weight = findInputAndAdd<ParamNode>("weight", "weight"); } catch (...) { } if (m_weight == nullptr) { Config wconfig = config; wconfig.put("name", name() + "_weight"); wconfig.put("type", "param"); wconfig.put("size", m_input_size); wconfig.put("meta", config.get<string>("meta", "$meta")); m_weight = model->createNode<ParamNode>(wconfig); BOOST_VERIFY(m_weight); } addInput(m_weight, "weight"); m_bias = nullptr; try { m_bias = findInputAndAdd<ParamNode>("bias", "bias"); } catch (...) { } if (m_bias == nullptr) { Config bconfig = config; bconfig.put("name", name() + "_bias"); bconfig.put("type", "param"); bconfig.put("size", m_output_size); bconfig.put("init", 0); bconfig.put("meta", config.get<string>("meta", "$meta")); m_bias = model->createNode<ParamNode>(bconfig); BOOST_VERIFY(m_bias); } addInput(m_bias, "bias"); BOOST_VERIFY(m_bias->data().size() == m_output_size); BOOST_VERIFY(m_weight->data().size() == m_input_size); } void predict () { data().tile(m_bias->data()); size_t rows = m_samples * m_output_size; size_t cols = m_fan_in; Array<>::value_type *input = m_input->data().addr(); BOOST_VERIFY(rows * cols == m_input->data().size()); Array<>::value_type *output = data().addr(); Array<>::value_type *weight = m_weight->data().addr(); BOOST_VERIFY(cols * m_output_size == m_weight->data().size()); #pragma omp parallel for for (size_t r = 0; r < rows; ++r) { Array<>::value_type *i = input + r * cols; Array<>::value_type *o = output + r; Array<>::value_type *w = weight + (r % m_output_size) * cols; for (unsigned d = 0; d < m_fan_in; ++d) { *o += i[d] * w[d]; } } } void update () { size_t rows = m_samples * m_output_size; size_t cols = m_fan_in; Array<>::value_type const *input = m_input->data().addr(); Array<>::value_type *input_delta = m_input->delta().addr(); Array<>::value_type const *output_delta = delta().addr(); Array<>::value_type const *weight = m_weight->data().addr(); Array<>::value_type *weight_delta = m_weight->delta().addr(); //#pragma omp parallel for for (size_t r = 0; r < rows; ++r) { Array<>::value_type const *i = input + r * cols; Array<>::value_type *id = input_delta + r * cols; Array<>::value_type const *od = output_delta + r; Array<>::value_type const *w = weight + (r % m_output_size) * cols; Array<>::value_type *wd = weight_delta + (r % m_output_size) * cols; for (unsigned d = 0; d < cols; ++d) { id[d] += w[d] * od[0]; wd[d] += i[d] * od[0] / m_samples; } } m_bias->delta().add_scaled_wrapping(1.0/m_samples, delta()); } }; namespace pool { struct max { static string name () { return "max"; } typedef unsigned state_type; template <typename T> static void predict (T const *in, state_type *s, T *out, size_t iw, size_t ow) { unsigned n = iw / ow; copy(in, in + ow, out); fill(s, s + ow, 0); in += ow; for (unsigned i = 1; i < n; ++i) { for (unsigned j = 0; j < ow; ++j) { if (in[j] > out[j]) { out[j] = in[j]; s[j] = i; } } in += ow; } } template <typename T> static void update (T *in, state_type const *s, T const *out, size_t iw, size_t ow) { for (unsigned j = 0; j < ow; ++j) { unsigned i = s[j]; in[i * ow + j] += out[j]; } } }; struct avg { static string name () { return "avg"; } typedef char state_type; template <typename T> static void predict (T const *in, state_type *s, T *out, size_t iw, size_t ow) { unsigned n = iw / ow; fill(out, out+ow, 0); for (unsigned i = 0; i < n; ++i) { for (unsigned j = 0; j < ow; ++j) { out[j] += in[j]; } in += ow; } for (unsigned j = 0; j < ow; ++j) { out[j] /= n; } } template <typename T> static void update (T *in, state_type const *s, T const *out, size_t iw, size_t ow) { unsigned n = iw / ow; for (unsigned i = 0; i < n; ++i) { for (unsigned j = 0; j < ow; ++j) { in[j] += out[j] / n; } in += ow; } } }; } template <typename POOL> class PoolNode: public ArrayNode { ArrayNode *m_input; Array<typename POOL::state_type> m_state; size_t m_input_channel; size_t m_output_channel; public: PoolNode (Model *model, Config const &config): ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); m_output_channel = config.get<size_t>("channel"); vector<size_t> size; m_input->data().size(&size); m_input_channel = size.back(); size.back() = m_output_channel; resize(size); m_state.resize(size); setType(m_input->type()); } void predict () { size_t n = m_input->data().size() / m_input_channel; #pragma omp parallel for for (size_t i = 0; i < n; ++i) { Array<>::value_type const *input = m_input->data().addr() + i * m_input_channel; typename POOL::state_type *state = m_state.addr() + i * m_output_channel;; Array<>::value_type *output = data().addr() + i * m_output_channel; POOL::predict(input, state, output, m_input_channel, m_output_channel); } } void update () { size_t samples = m_input->data().size(size_t(0)); #pragma omp parallel for for (size_t s = 0; s < samples; ++s) { Array<>::value_type *input = m_input->delta().at(s); typename POOL::state_type const *state = m_state.at(s); Array<>::value_type const *output = delta().at(s); size_t n = m_input->data().size() / m_input_channel / samples; for (size_t i = 0; i < n; ++i) { POOL::update(input, state, output, m_input_channel, m_output_channel); input += m_input_channel; state += m_output_channel; output += m_output_channel; } } } }; class WindowNode: public ArrayNode { size_t m_bin; size_t m_step; size_t m_samples; ArrayNode *m_input; vector<size_t> m_input_shape; vector<size_t> m_output_shape; public: WindowNode (Model *model, Config const &config): ArrayNode(model, config) { m_bin = config.get<size_t>("bin"); m_step = config.get<size_t>("step"); m_input = findInputAndAdd<ArrayNode>("input", "input"); m_input_shape.resize(m_input->data().dim()); for(size_t i = 0; i < m_input_shape.size(); ++i) { m_input_shape[i] = m_input->data().size(i); } m_samples = m_input_shape[0]; BOOST_VERIFY(m_input->type() == IMAGE || m_input->type() == SOUND); setType(m_input->type()); m_output_shape.push_back(m_input_shape[0]); m_output_shape.push_back(1 + (m_input_shape[1] - m_bin) / m_step); size_t channels = m_input_shape.back() * m_bin; if (m_input->type() == IMAGE) { m_output_shape.push_back(1 + (m_input_shape[2] - m_bin) / m_step); setType(IMAGE); channels *= m_bin; } else { setType(SOUND); } m_output_shape.push_back(channels); resize(m_output_shape); /*{ vector<size_t> &size = m_output_shape; cerr << name() << ' '; cerr << size.size() << ':'; for (auto const &v: size) { cerr << " " << v; } cerr << endl; }*/ } void predict () { Array<>::value_type const *packed = m_input->data().addr(); if (type() == IMAGE) { size_t patch_width = m_input->data().walk<2>(packed, m_bin) - packed; //std::cout << patch_width << std::endl; BOOST_VERIFY(patch_width == m_output_shape.back() / m_bin); #pragma omp parallel for for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type const *packed_1 = m_input->data().walk<0>(packed, i); Array<>::value_type *unpacked = data().walk<0>(data().addr(), i); for (size_t j = 0; j + m_bin <= m_input_shape[1]; j += m_step) { // row Array<>::value_type const *packed_2 = packed_1; // TODO??? maybe start from an offset for (size_t k = 0; k + m_bin <= m_input_shape[2]; k += m_step) { // col //// Array<>::value_type const *packed_3 = packed_2; // TODO??? maybe start from an offset for (size_t l = 0; l < m_bin; ++l) { // m_bin rows copy(packed_3, packed_3 + patch_width, unpacked); unpacked += patch_width; packed_3 = m_input->data().walk<1>(packed_3); } packed_2 = m_input->data().walk<2>(packed_2, m_step); } packed_1 = m_input->data().walk<1>(packed_1, m_step); } } } else { /* for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type *unpacked = data().at(i); Array<>::value_type const *packed = m_input->data().at(i); for (size_t j = 0; j < m_input_shape[1]; j += m_bin;) { unpacked += m_output_shape.back(); } } */ BOOST_VERIFY(0); } } void update () { Array<>::value_type *packed = m_input->delta().addr(); if (type() == IMAGE) { size_t patch_width = m_input->data().walk<2>(packed, m_bin) - packed; //std::cout << patch_width << std::endl; BOOST_VERIFY(patch_width == m_output_shape.back() / m_bin); #pragma omp parallel for for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type *packed_1 = m_input->data().walk<0>(packed, i); Array<>::value_type const *unpacked = delta().walk<0>(delta().addr(), i); for (size_t j = 0; j + m_bin <= m_input_shape[1]; j += m_step) { // row Array<>::value_type *packed_2 = packed_1; // TODO??? maybe start from an offset for (size_t k = 0; k + m_bin <= m_input_shape[2]; k += m_step) { // col //// Array<>::value_type *packed_3 = packed_2; // TODO??? maybe start from an offset for (size_t l = 0; l < m_bin; ++l) { // m_bin rows //copy(packed_3, packed_3 + patch_width, unpacked); for (unsigned m = 0; m < patch_width; ++m) { packed_3[m] += unpacked[m]; } unpacked += patch_width; packed_3 = m_input->data().walk<1>(packed_3); } packed_2 = m_input->data().walk<2>(packed_2, m_step); } packed_1 = m_input->data().walk<1>(packed_1, m_step); } } } else { /* for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type *unpacked = data().at(i); Array<>::value_type const *packed = m_input->data().at(i); for (size_t j = 0; j < m_input_shape[1]; j += m_bin;) { unpacked += m_output_shape.back(); } } */ BOOST_VERIFY(0); } } }; class SoftMaxNode: public ArrayNode { ArrayNode *m_input; public: SoftMaxNode (Model *model, Config const &config) : ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); resize(*m_input); setType(m_input->type()); } void predict () { size_t samples = m_input->data().size(size_t(0)); size_t sz = m_input->data().size() / samples; Array<>::value_type const *in = m_input->data().addr(); Array<>::value_type *out = data().addr(); vector<Array<>::value_type> e(sz); for (size_t i = 0; i < samples; ++i) { Array<>::value_type sum = 0; for (size_t j = 0; j < sz; ++j) { e[j] = exp(in[j]); sum += e[j]; } for (size_t j = 0; j < sz; ++j) { out[j] = e[j]/sum; } in += sz; out += sz; } } void update () { size_t samples = m_input->data().size(size_t(0)); size_t sz = m_input->data().size() / samples; Array<>::value_type const *in = m_input->data().addr(); Array<>::value_type *in_delta = m_input->delta().addr(); Array<>::value_type const *out = data().addr(); Array<>::value_type const *out_delta = delta().addr(); //cerr << "SOFTMAX UPDATE" << endl; for (;;) { if (outputs().size() != 1) break; Node *node = outputs()[0].node; LogPOutputNode *logp = dynamic_cast<LogPOutputNode *>(node); if (logp == nullptr) break; //cerr << "OPTIMIZE SOFTMAX + LOGP" << endl; vector<int> const &labels = logp->inputLabels(); BOOST_VERIFY(labels.size() == samples); for (size_t i = 0; i < samples; ++i) { for (size_t j = 0; j < sz; ++j) { in_delta[j] += out[j]; } int l = labels[i]; BOOST_VERIFY(l < int(sz)); BOOST_VERIFY(l >= 0); if (l < int(sz)) { in_delta[l] -= 1.0; } in_delta += sz; out += sz; } return; } for (size_t i = 0; i < samples; ++i) { Array<>::value_type sum = 0; for (unsigned j = 0; j < sz; ++j) { sum += out_delta[j] * out[j]; } for (unsigned j = 0; j < sz; ++j) { in_delta[j] += out[j] * (out_delta[j] - sum); } in += sz; in_delta += sz; out += sz; out_delta += sz; } } }; class NormalizeNode: public ArrayNode { ArrayNode *m_input; vector<Array<>::value_type> m_rate; size_t m_samples; size_t m_dim; public: NormalizeNode (Model *model, Config const &config) : ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); resize(*m_input); setType(m_input->type()); m_samples = data().size(size_t(0)); m_dim = data().size() / m_samples; m_rate.resize(m_samples); } void predict () { Array<>::value_type const *in = m_input->data().addr(); Array<>::value_type *out = data().addr(); for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type sum = 0; for (size_t j = 0; j < m_dim; ++j) { sum += in[j] * in[j];; } Array<>::value_type r = 1.0 / sqrt(sum / m_dim); m_rate[i] = r; for (size_t j = 0; j < m_dim; ++j) { out[j] = in[j] * r; } in += m_dim; out += m_dim; } } void update () { Array<>::value_type const *in = m_input->data().addr(); Array<>::value_type *in_delta = m_input->delta().addr(); Array<>::value_type const *out = data().addr(); Array<>::value_type const *out_delta = delta().addr(); for (size_t i = 0; i < m_samples; ++i) { for (size_t j = 0; j < m_dim; ++j) { in_delta[j] = out_delta[j] * m_rate[i] * (1.0 - out[j] * out[j] / m_dim); } in += m_dim; out += m_dim; in_delta += m_dim; out_delta += m_dim; } } }; class DropOutNode: public ArrayNode { ArrayNode *m_input; double m_rate; int m_freq; vector<Array<>::value_type> m_mask; size_t m_samples; size_t m_sample_size; size_t m_cnt; public: DropOutNode (Model *model, Config const &config) : ArrayNode(model, config) { m_input = findInputAndAdd<ArrayNode>("input", "input"); m_rate = config.get<double>("rate", 0.5); m_freq = config.get<double>("freq", 1); m_cnt = 0; resize(*m_input); setType(m_input->type()); m_samples = data().size(size_t(0)); m_sample_size = data().size() / m_samples; m_mask.resize(m_sample_size, 0); size_t nz = m_mask.size() * m_rate; m_rate = double(nz) / m_mask.size(); for (size_t i = 0; i < nz; ++i) { m_mask[i] = 1.0; } } void predict () { if (mode() == MODE_PREDICT) { #pragma omp parallel for for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type const *in = m_input->data().at(i); Array<>::value_type *out = data().at(i); for (size_t j = 0; j < m_sample_size; ++j) { out[j] = in[j] * m_rate; } } } else { if (m_cnt % m_freq == 0) { random_shuffle(m_mask.begin(), m_mask.end()); } ++m_cnt; #pragma omp parallel for for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type const *in = m_input->data().at(i); Array<>::value_type *out = data().at(i); for (size_t j = 0; j < m_sample_size; ++j) { out[j] = m_mask[j] * in[j]; } } } //data().apply(m_input->data(), [](Array<>::value_type &y, Array<>::value_type x){y = F::forward(x);}); } void update () { #pragma omp parallel for for (size_t i = 0; i < m_samples; ++i) { Array<>::value_type *in = m_input->delta().at(i); Array<>::value_type const *out = delta().at(i); for (size_t j = 0; j < m_sample_size; ++j) { in[j] += m_mask[j] * out[j]; } } } }; } } #endif
elkan_commons.c
#include "elkan_commons.h" #include "../../utils/vector/sparse/sparse_vector_math.h" #include "../../utils/global_defs.h" void calculate_cluster_distance_matrix(struct general_kmeans_context* ctx , VALUE_TYPE** dist_clusters_clusters , VALUE_TYPE* min_dist_cluster_clusters , uint32_t* stop) { uint64_t i; uint64_t j; VALUE_TYPE dist_eval; #pragma omp parallel for for(i = 0; i < ctx->no_clusters; i++) { /* reset min_dist_cluster_clusters */ min_dist_cluster_clusters[i] = VALUE_TYPE_MAX; } /* evaluate block vector approximation. */ #pragma omp parallel for private(j) for(i = 0; i < ctx->no_clusters; i++) { if (*stop) continue; if (omp_get_thread_num() == 0) check_signals(stop); for(j = 0; j < ctx->no_clusters; j++) { if (i > j) { if (!(ctx->clusters_not_changed[i] && ctx->clusters_not_changed[j])) { /* if none of the two clusters moved, dont recalculate the distance */ dist_clusters_clusters[i][j] = euclid_vector( ctx->cluster_vectors[i].keys , ctx->cluster_vectors[i].values , ctx->cluster_vectors[i].nnz , ctx->cluster_vectors[j].keys , ctx->cluster_vectors[j].values , ctx->cluster_vectors[j].nnz , ctx->vector_lengths_clusters[i] , ctx->vector_lengths_clusters[j]); ctx->done_calculations += 1; } dist_clusters_clusters[j][i] = dist_clusters_clusters[i][j]; dist_eval = 0.5 * dist_clusters_clusters[i][j]; min_dist_cluster_clusters[i] = (dist_eval < min_dist_cluster_clusters[i]) ? dist_eval : min_dist_cluster_clusters[i]; min_dist_cluster_clusters[j] = (dist_eval < min_dist_cluster_clusters[j]) ? dist_eval : min_dist_cluster_clusters[j]; } } } }
mandel_openmp.h
// The MIT License (MIT) // // Copyright (c) 2015-2017 CERN // // Authors: Przemyslaw Karpinski, Mathieu Gravey // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // // This piece of code was developed as part of ICE-DIP project at CERN. // "ICE-DIP is a European Industrial Doctorate project funded by the European Community's // 7th Framework programme Marie Curie Actions under grant PITN-GA-2012-316596". #ifndef UME_MANDEL_OPENMP_H_ #define UME_MANDEL_OPENMP_H_ #define N 16 void mandel_openmp( float x1, float y1, float x2, float y2, uint32_t width, uint32_t height, uint32_t maxIters, uint16_t * image) { float dx = (x2 - x1) / width, dy = (y2 - y1) / height; for (uint32_t index = 0; index < height*width; index+=N) { uint32_t i[N]; uint32_t j[N]; float cx[N]; float cy[N]; float x[N]; float y[N]; uint32_t val[N]; #pragma omp simd for (uint32_t subIndex = 0; subIndex < N; ++subIndex) { i[subIndex]=(index+subIndex)%width; j[subIndex]=(index+subIndex)/width; cx[subIndex] = x1 + dx*i[subIndex]; cy[subIndex] = y1 + dy*j[subIndex]; x[subIndex] = cx[subIndex]; y[subIndex] = cy[subIndex]; val[subIndex]=0; } unsigned char todo=16; uint32_t count = 1; uint32_t stepSize=16; while ((todo>2) && (count < maxIters)) { todo=false; #pragma omp simd reduction(+:todo) for (uint32_t subIndex = 0; subIndex < N; ++subIndex) { bool localTodo; #pragma unroll (4) for (uint32_t k = 0; k < stepSize ; ++k) { float x2 = x[subIndex] * x[subIndex]; float y2 = y[subIndex] * y[subIndex]; localTodo = (x2 + y2 < 4.0f); float xy = x[subIndex]*y[subIndex]; x[subIndex] +=(localTodo) * (x2 - y2 + cx[subIndex] - x[subIndex]); y[subIndex] +=(localTodo) * (2 * xy + cy[subIndex] - y[subIndex]); val[subIndex]=val[subIndex]+localTodo; } todo+=localTodo; } count+=stepSize; } uint32_t stopCount=count; for (uint32_t subIndex = 0; subIndex < N; ++subIndex) { float x2 = x[subIndex] * x[subIndex]; float y2 = y[subIndex] * y[subIndex]; bool localTodo = (x2 + y2 < 4.0f); count=stopCount; while ((localTodo) && (count < maxIters)) { float x2 = x[subIndex] * x[subIndex]; float y2 = y[subIndex] * y[subIndex]; if (!(x2 + y2 < 4.0f)) break; float xy = x[subIndex]*y[subIndex]; x[subIndex] +=(x2 - y2 + cx[subIndex]); y[subIndex] += (2 * xy + cy[subIndex]); val[subIndex]=val[subIndex]+1; count++; } } #pragma omp simd for (uint32_t subIndex = 0; subIndex < N; ++subIndex) { image[index+subIndex] = val[subIndex]; } } } void benchmarkOpenMP(int width, int height, int depth, char * filename, char * resultPrefix, int iterations, TimingStatistics & reference) { TimingStatistics stats; UME::Bitmap bmp(width, height, UME::PIXEL_TYPE_RGB); uint8_t* image = bmp.GetRasterData(); unsigned short *raw_image; // Using 64B alignment in this case will not incure visible memory penalty, but will guarantee proper alignment. raw_image = (unsigned short *)UME::DynamicMemory::AlignedMalloc(width*height*sizeof(unsigned short), 64); for (int i = 0; i < iterations; i++) { TIMING_RES start, end; memset(raw_image, 0, width*height *sizeof(uint16_t)); start = __rdtsc(); mandel_openmp(0.29768f, 0.48364f, 0.29778f, 0.48354f, width, height, depth, raw_image); end = __rdtsc(); stats.update(end - start); // Rewrite algorithm output to BMP format for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { int value = raw_image[h*width + w]; Color c = getColor(value); image[3 * (h*width + w) + 0] = c.r; image[3 * (h*width + w) + 1] = c.g; image[3 * (h*width + w) + 2] = c.b; } } // Saving to file to make sure the results generated are correct bmp.SaveToFile(filename); bmp.ClearTarget(0, 255, 0); } std::cout << resultPrefix << (unsigned long long) stats.getAverage() << ", dev: " << (unsigned long long) stats.getStdDev() << " (speedup: " << stats.calculateSpeedup(reference) << ")" << std::endl; UME::DynamicMemory::AlignedFree(raw_image); } #undef N #endif
stresslet_real_rc.c
#include "stresslet_real_rc.h" #include "cell_list.h" #ifdef BEENAKKER #include "beenakker_op_fd.h" #else #error "Must provide -D<method> to compiler" #endif #ifdef _OPENMP #define CRITICAL _Pragma("omp critical") #else #define CRITICAL #endif #define SWAP(x,y) { tmp=x;x=y;y=tmp; } static void quicksort(int* restrict list, int* restrict slave, int m, int n); static void barrier(int bar_num, int *barrier_in, int *barrier_out, int *num_procs); static void transpose(const double* restrict in, double* restrict out, const int N); // ==== GENERATE TRIPLETS FOR MATRIX ASSEMBLY void get_rs_triplets ( const double* restrict x_in, const double* restrict nvec_in, const int N, const double* restrict box, const double xi, const double rc, const int nlhs, int* restrict *row_p, int* restrict *col_p, double* restrict val[3][3], int* restrict *buck_size_p, int* restrict *idx_in_array_p, int* numel_p ) { // Fix input (legacy format gives bad memory access) double* restrict x = __MALLOC(3*N*sizeof(double)); double* restrict nvec = __MALLOC(3*N*sizeof(double)); transpose(x_in, x, N); transpose(nvec_in, nvec, N); // Setup output variables int* restrict row; int* restrict col; int* restrict idx_in_array; int* restrict buck_size; // Setup variables int i,j; int ncell[3]; int* restrict ll; int* restrict head; double rn; int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1}; int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1}; int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1}; struct timeval tic, toc; gettimeofday(&tic, NULL); double time_spent; // Build cell list build_linked_cell_list(x, N, box, rc, &rn, ncell, &ll, &head); if(VERBOSE) { __PRINTF("[RSRC] SPARSE MATRIX\n"); __PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi); __PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn); __PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n", box[0],box[1],box[2], ncell[0],ncell[1],ncell[2]); } //============================================================ // CALCULATE INTERACTIONS // // For all vectors, go through neighbors and save interactions // in vectors that are used to create a sparse matrix // Allocate a guess based on average density +50% int maxel = round( 1.5 * N*N*4*PI*rc*rc*rc/3/(box[0]*box[1]*box[2]) ); int numel = 0; size_t malloc_tot = maxel * (2*sizeof(int) + 6*sizeof(double)); ASSERT(malloc_tot < MALLOC_MAX, "MALLOC_MAX exceeded"); row = __MALLOC(maxel*sizeof(int)); col = __MALLOC(maxel*sizeof(int)); for(i=0;i<=2;i++) for(j=i;j<=2;j++) { val[i][j] = __MALLOC(maxel*sizeof(double)); } #ifdef _OPENMP int barrier_in[2] = {0,0}; int barrier_out[2] = {0,0}; int realloc_done=0; int num_procs; #pragma omp parallel private(i,j) \ shared(numel,maxel,row,col,val,box,x,nvec,head,ll,px,py,pz,ncell,rn,barrier_in,barrier_out,realloc_done,num_procs) \ default(none) #endif { // Begin parallel section int head_idx; int icell[3], home_cell[3]; int idx_s,idx_t,ip; double rsq; double pshift[3], xs[3], ns[3], nt[3], xr[3]; double A1[3][3], A2[3][3]; const double rcsq = rc*rc; // Allocate a bufffer of interactions to be written // into triplet list const int buf_size = 256; int buf_cnt = 0; int idx_buf, next_idx_t; int* restrict buf_idx_t; double* restrict buf_xr; double* restrict buf_rsq; double* restrict C; double* restrict D; int tnum = 0; #ifdef _OPENMP tnum = omp_get_thread_num(); #pragma omp single num_procs = omp_get_num_threads(); if(VERBOSE) { #pragma omp master __PRINTF("[RSRC] Running on %d threads.\n",num_procs); } // Seems mxMalloc/mxFree are not thread safe #pragma omp critical { #endif buf_idx_t = __MALLOC(buf_size*sizeof(int)); buf_xr = __MALLOC(3*buf_size*sizeof(double)); buf_rsq = __MALLOC(buf_size*sizeof(double)); C = __MALLOC(buf_size*sizeof(double)); D = __MALLOC(buf_size*sizeof(double)); #ifdef _OPENMP } #pragma omp for schedule(dynamic) nowait #endif // Loop over all points for(idx_s=0;idx_s<N;idx_s++) { for(j=0; j<3; j++) { // Source point xs[j] = x[idx_s*3+j]; // Determine home cell home_cell[j] = xs[j]/rn; // Source point normal vector ns[j] = nvec[idx_s*3+j]; } // Iterate through near cells (including home cell) for(ip=0; ip<27; ip++) { // Get neigh cell icell[0] = home_cell[0] + px[ip]; icell[1] = home_cell[1] + py[ip]; icell[2] = home_cell[2] + pz[ip]; // Periodic wrap for(j=0; j<3; j++) { // (Could do this with mod) pshift[j] = 0; if(icell[j] >= ncell[j]) { icell[j] = 0; pshift[j] = box[j]; } else if(icell[j]<0) { icell[j] = ncell[j]-1; pshift[j] = -box[j]; } } head_idx = icell[0] + icell[1]*ncell[0] + icell[2]*ncell[1]*ncell[0]; // Go through cell list idx_t = head[head_idx]; while(1) { if(idx_t > idx_s) { // r points from s to t for(j=0; j<3; j++) xr[j] = x[idx_t*3+j] + pshift[j] - xs[j]; // Check if we are within truncation radius rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2]; if(rsq <= rcsq) { // Yes, so put interaction in buffer buf_idx_t[buf_cnt] = idx_t; buf_rsq[buf_cnt] = rsq; for(i=0;i<3;i++) buf_xr[3*buf_cnt+i] = xr[i]; buf_cnt++; } } // Save location of next point in cell chain if(idx_t == -1) next_idx_t = -1; else next_idx_t = ll[idx_t]; // Empty buffer if last point of last neighbour, // or buffer full if ( (ip==26 && next_idx_t==-1) || buf_cnt==buf_size) { // Check if we have enough space to hold buffer contents int idx_write, can_write; #ifdef _OPENMP #pragma omp critical #endif { /* begin critical section */ // Check if buffer holds writing space for me if(maxel-numel <= 2*buf_cnt) { can_write = 0; //__PRINTF("[%d] Can't write, reallocation needed! \n",tnum); } else can_write = 1; // Reserve writing in either case idx_write = numel; numel += 2*buf_cnt; } /* end critical section */ /* Begin can_write==0 */ if(can_write==0) { int alloc_add = buf_size; // How much to add to allocation (single thread) #ifdef _OPENMP // Everybody has to wait here before reallocation // Allocate more than a fuller buffer for every thread alloc_add = num_procs*buf_size; #pragma omp critical realloc_done = 0; // Everybody agrees reallocation has not been done barrier(0, barrier_in, barrier_out, &num_procs); #pragma omp critical { // Critical section if(realloc_done==0) { realloc_done=1; #endif // Allocate for full buffer(s) + 20% more int new_maxel = ceil(1.2*(maxel+alloc_add)); if (VERBOSE) __PRINTF("[RSRC][%d] Reallocating triplet vectors %d -> %d\n",tnum,maxel,new_maxel); maxel = new_maxel; row = __REALLOC(row, maxel*sizeof(int)); col = __REALLOC(col, maxel*sizeof(int)); for(i=0;i<=2;i++) for(j=i;j<=2;j++) val[i][j] = __REALLOC(val[i][j], maxel*sizeof(double)); #ifdef _OPENMP //__PRINTF("[%d] Done \n",tnum); } else { //__PRINTF("[%d] Someone else reallocated \n",tnum); } } barrier(1, barrier_in, barrier_out, &num_procs); #endif } /* End can_write==0 */ // Do delayed calculations op_A_CD(C,D,buf_rsq,buf_cnt,xi); //#pragma omp critical //__PRINTF("[%d] Begin write \n",tnum); // Write triplets for(idx_buf=0;idx_buf<buf_cnt;idx_buf++) { idx_t = buf_idx_t[idx_buf]; for(i=0;i<3;i++) { xr[i] = buf_xr[3*idx_buf+i]; // Source point normal vector nt[i] = nvec[idx_t*3+i]; } // Calculate interactions t->s and s<-t op_A_symm_CD(A1,A2,xr,ns,nt,xi,C[idx_buf],D[idx_buf]); // Append results to row,col,val vectors row[idx_write] = idx_t; col[idx_write] = idx_s; for(i=0; i<=2; i++) for(j=i; j<=2; j++) { val[i][j][idx_write] = A1[i][j]; } idx_write++; row[idx_write] = idx_s; col[idx_write] = idx_t; for(i=0; i<=2; i++) for(j=i; j<=2; j++) { val[i][j][idx_write] = A2[i][j]; } idx_write++; } // endfor buffer //#pragma omp critical //__PRINTF("[%d] End write \n",tnum); buf_cnt = 0; } // endif chainend or buffull idx_t = next_idx_t; if(idx_t == -1) break; // Chain ended } // End of neighbours in this cell } // End of cells } // End of particles #ifdef _OPENMP #pragma omp critical { //__PRINTF("[%d] Exit loop , barrier_in={%d,%d}\n",tnum, barrier_in[0], barrier_in[1]); #pragma omp atomic // One less thread going around in loop num_procs--; } #pragma omp critical #endif { __FREE(buf_idx_t); __FREE(buf_xr); __FREE(buf_rsq); __FREE(C); __FREE(D); } } // End parallel section // Free allocations __FREE(head); __FREE(ll); __FREE(x); __FREE(nvec); if(VERBOSE) { gettimeofday(&toc, NULL); time_spent = DELTA(tic,toc); __PRINTF("[RSRC] Triplets generated in %.3f seconds.\n", time_spent); } // Reallocate (shrink) values to actual size used gettimeofday(&tic, NULL); for(i=0;i<=2;i++) for(j=i;j<=2;j++) { double* tmp = val[i][j]; val[i][j] = __REALLOC(val[i][j], numel*sizeof(double)); if (tmp != val[i][j] && VERBOSE) __PRINTF("[RSRC] Realloc moved val[%d][%d].\n", i, j); } if(VERBOSE) { gettimeofday(&toc, NULL); time_spent = DELTA(tic,toc); __PRINTF("[RSRC] Realloc %d->%d took %.3f seconds.\n", maxel, numel, time_spent); } //============================================ // SORT RESULTS WITH COUNTING + QUICK SORT // Counting sort on columns, then quicksort on rows // in each column // (Turns out this is counting sort rather than bucket sort, // which I initially thought, hence the buck_* naming.) gettimeofday(&tic, NULL); buck_size = __MALLOC(N*sizeof(int)); idx_in_array = __MALLOC(numel*sizeof(int)); int* restrict buck_count = __MALLOC(N*sizeof(int)); int* restrict buck_pos = __MALLOC(N*sizeof(int)); int buck_idx,new_idx; // Init lists for(i=0;i<N;i++) { buck_size[i]=0; buck_count[i]=0; } // Count number of elements in each bucket (column) for(i=0;i<numel;i++) { buck_idx = col[i]; buck_size[buck_idx]++; } // Cumulative addition to get locations of each bucket after sort, // + save largest bucket size for later. buck_pos[0] = 0; for(i=1;i<N;i++) { buck_pos[i] = buck_pos[i-1]+buck_size[i-1]; } // Assign each element to a bucket, store permutations in idx_in_array int* restrict rowtmp = __MALLOC(numel*sizeof(int)); for(i=0;i<numel;i++) { buck_idx = col[i]; new_idx = buck_pos[buck_idx] + buck_count[buck_idx]; idx_in_array[ new_idx ] = i; buck_count[buck_idx]++; } __FREE(buck_count); // Free counter // Sort rows using permutations // (work-shared) #ifdef _OPENMP #pragma omp parallel for default(shared) #endif for(i=0;i<numel;i++) rowtmp[i] = row[ idx_in_array[i] ]; __FREE(row); row = rowtmp; if(nlhs==1) { __FREE(col); // Free column list if only returning matrix, } else { // else sort columns too. // Could be done faster with bucket info, but sorted columns are // not needed for real application. int* restrict coltmp = __MALLOC(numel*sizeof(int)); for(i=0;i<numel;i++) coltmp[i] = col[ idx_in_array[i] ]; __FREE(col); col = coltmp; } gettimeofday(&toc,NULL); time_spent = DELTA(tic,toc); if(VERBOSE) __PRINTF("[RSRC] Counting sort of cols finished in %.3f seconds.\n", time_spent); gettimeofday(&tic,NULL); // Quicksort on buckets // Each bucket contains a compressed column. #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) default(none) shared(buck_pos,buck_size,idx_in_array,row) #endif for(buck_idx=0;buck_idx<N;buck_idx++) { int begin = buck_pos[buck_idx]; int size = buck_size[buck_idx]; quicksort(row, idx_in_array, begin, begin+size-1) ; } __FREE(buck_pos); // Free bucket list gettimeofday(&toc,NULL); time_spent = DELTA(tic,toc); if(VERBOSE) __PRINTF("[RSRC] Quicksort of rows finished in %.3f seconds.\n", time_spent); // Set return pointers *row_p = row; *col_p = col; *buck_size_p = buck_size; *idx_in_array_p = idx_in_array; *numel_p = numel; } //============ QUICKSORT ROUTINE // Applies quicksort on an interval (m,n) of *list, // performs the same permutations on *slave. // Uses a private stack instead of making recursive calls. static void quicksort(int* restrict list, int* restrict slave, int m, int n) { #define MAX_LEVELS 64 int beg[MAX_LEVELS], end[MAX_LEVELS]; // Stack int key,i,j,k,s,tmp; s=0; beg[0]=m; end[0]=n; while (s>=0) { // While work in stack, pop m=beg[s]; n=end[s]; if (m<n) { k = m+(n-m)/2; // Choose middle for pivot SWAP(list[m],list[k]); // Swap out pivot SWAP(slave[m],slave[k]); // Do quicksort key = list[m]; i = m+1; j = n; while(i <= j) { while((i <= n) && (list[i] <= key)) i++; while((j >= m) && (list[j] > key)) j--; if( i < j) { SWAP(list[i],list[j]); SWAP(slave[i],slave[j]); } } // Swap in pivot at right place SWAP(list[m],list[j]); SWAP(slave[m],slave[j]); if(s == MAX_LEVELS-1) // Stack full { __PRINTF("ERROR. Quicksort reached MAX_LEVELS\n"); return; } // Recursively sort the lesser list beg[s] = m; end[s] = j-1; beg[s+1]=j+1; end[s+1]=n; s += 1; // Do shortest interval first to limit stack use if (end[s]-beg[s]>end[s-1]-beg[s-1]) { SWAP(beg[s],beg[s-1]); SWAP(end[s],end[s-1]); } } else { s--; } } } //============ Home-brewed barrier static void barrier(int bar_num, int *barrier_in, int *barrier_out, int *num_procs) { #ifdef _OPENMP //int tnum = omp_get_thread_num(); // Barrrier arrive #pragma omp critical { barrier_in[bar_num]++; // Announce you arrived at barrier //__PRINTF("[%d] Reached barrier %d (%d,%d) \n", tnum, bar_num, barrier_in[bar_num], *num_procs); } // Barrier spin while(barrier_in[bar_num] < *num_procs) { #pragma omp flush }; // Barrier depart #pragma omp critical { barrier_out[bar_num]++; // Anounce you passed barrier //__PRINTF("[%d] Passed barrier %d (%d,%d) \n", tnum, bar_num, barrier_out[bar_num], *num_procs); } // Barrier reset #pragma omp critical { if (barrier_out[bar_num] == barrier_in[bar_num]) { //__PRINTF("[%d] Everybody passed barrier %d. \n",tnum, bar_num); barrier_in[bar_num] = 0; barrier_out[bar_num] = 0; } } #endif } // ******************************** compute_rsrc_direct ****************** // *********************************************************************** // Transpose vector void transpose(const double* restrict in, double* restrict out, const int N) { for(int i=0; i<N; i++) { for(int j=0; j<3; j++) { out[i*3+j] = in[i+j*N]; } } } // Empty buffer used in direct computation static void compute_buffer_direct( double* restrict C, double* restrict D, double* restrict buf_rsq, const int buf_cnt, const double xi, const int* restrict buf_idx_t, const double* restrict buf_xr, const double* restrict nvec, const double* restrict fvec, double* restrict ns, double* restrict fs, double* restrict phi, double* restrict phi_idx_s) { int idx_t; double xr[3],nt[3],ft[3]; // Do delayed calculations op_A_CD(C,D,buf_rsq,buf_cnt,xi); // Save interactions for(int idx_buf=0;idx_buf<buf_cnt;idx_buf++) { idx_t = buf_idx_t[idx_buf]; for(int i=0;i<3;i++) { xr[i] = buf_xr[3*idx_buf+i]; // Target point normal vector nt[i] = nvec[idx_t*3+i]; // Target point distribution density ft[i] = fvec[idx_t*3+i]; } // Calculate interactions t->s and s<-t double phi_idx_t[3] = {0.0,0.0,0.0}; op_A_comp_symm_CD(xr,phi_idx_s,phi_idx_t,ns,nt,fs,ft,xi,C[idx_buf],D[idx_buf]); for(int i=0; i<3; i++) phi[idx_t*3+i] += phi_idx_t[i]; } } // ==== Compute result directly // Do not build sparse matrix void compute_rsrc_direct (const double* restrict x_in, const double* restrict nvec_in, const double* restrict fvec_in, const int N, const double* restrict box, const double xi, const double rc, double* restrict *phi_p ) { struct timeval tic, toc; gettimeofday(&tic, NULL); double time_spent; // Fix input (legacy format gives bad memory access) double* restrict x = __MALLOC(3*N*sizeof(double)); double* restrict nvec = __MALLOC(3*N*sizeof(double)); double* restrict fvec = __MALLOC(3*N*sizeof(double)); transpose(x_in, x, N); transpose(fvec_in, fvec, N); transpose(nvec_in, nvec, N); gettimeofday(&toc, NULL); double time_tr = DELTA(tic,toc); // Setup output double* restrict phi_out = __MALLOC(3*N*sizeof(double)); for(int i=0;i<3*N;i++) phi_out[i] = 0.0; // Setup variables int ncell[3]; int* restrict cell_list; int* restrict cell_idx; double rn; int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1}; int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1}; int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1}; // Build cell list gettimeofday(&tic, NULL); build_cell_list(x, N, box, rc, &rn, ncell, &cell_list, &cell_idx); gettimeofday(&toc, NULL); time_spent = DELTA(tic,toc); if(VERBOSE) { __PRINTF("[RSRC] MATRIX-FREE\n"); __PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi); __PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn); __PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n", box[0],box[1],box[2], ncell[0],ncell[1],ncell[2]); __PRINTF("[RSRC] Cell list built in %.3f seconds.\n", time_spent); } gettimeofday(&tic, NULL); #ifdef _OPENMP #pragma omp parallel \ shared(phi_out,box,x,nvec,fvec,cell_list,cell_idx, \ px,py,pz,ncell,rn) \ default(none) #endif { // Begin parallel section // Setup local output double* restrict phi; CRITICAL { phi = __MALLOC(3*N*sizeof(double)); } for(int i=0;i<3*N;i++) phi[i] = 0.0; int i,j; int icell_idx; int icell[3], home_cell[3]; int idx_s,idx_t,ip; double rsq; double pshift[3], xs[3], ns[3], fs[3], xr[3]; const double rcsq = rc*rc; // Allocate a bufffer of interactions to be written // into triplet list const int buf_size = 256; int buf_cnt = 0; int buf_idx_t[buf_size]; double buf_xr[3*buf_size]; double buf_rsq[buf_size]; double C[buf_size]; double D[buf_size]; int num_procs = 1; #ifdef _OPENMP num_procs = omp_get_num_threads(); if(VERBOSE) { #pragma omp master __PRINTF("[RSRC] Running on %d threads.\n",num_procs); } #pragma omp for schedule(dynamic) nowait #endif // Loop over all points (work-shared) for(idx_s=0;idx_s<N;idx_s++) { double phi_idx_s[3] = {0.0, 0.0, 0.0}; for(i=0; i<3; i++) { // Source point xs[i] = x[idx_s*3+i]; // Source point normal vector ns[i] = nvec[idx_s*3+i]; // Source point distribution density fs[i] = fvec[idx_s*3+i]; // Determine home cell home_cell[i] = xs[i]/rn; } // Iterate through near cells (including home cell) for(ip=0; ip<27; ip++) { // Get neigh cell icell[0] = home_cell[0] + px[ip]; icell[1] = home_cell[1] + py[ip]; icell[2] = home_cell[2] + pz[ip]; // Periodic wrap for(j=0; j<3; j++) { // (Could do this with mod) pshift[j] = 0; if(icell[j] >= ncell[j]) { icell[j] = 0; pshift[j] = box[j]; } else if(icell[j]<0) { icell[j] = ncell[j]-1; pshift[j] = -box[j]; } } icell_idx = icell[0] + icell[1]*ncell[0] + icell[2]*ncell[1]*ncell[0]; // Go through cell list int cell_a = cell_idx[icell_idx]; int cell_b = cell_idx[icell_idx+1]; for(int point_idx=cell_a; point_idx<cell_b; point_idx++) { idx_t = cell_list[point_idx]; if(idx_t > idx_s) { // r points from s to t for(j=0; j<3; j++) xr[j] = x[idx_t*3+j] + pshift[j] - xs[j]; // Check if we are within truncation radius rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2]; if(rsq <= rcsq) { // Yes, so put interaction in buffer buf_idx_t[buf_cnt] = idx_t; buf_rsq[buf_cnt] = rsq; for(i=0;i<3;i++) buf_xr[3*buf_cnt+i] = xr[i]; buf_cnt++; } } // Empty buffer if full if (buf_cnt==buf_size) { compute_buffer_direct(C,D,buf_rsq,buf_cnt,xi,buf_idx_t,buf_xr,nvec,fvec,ns,fs,phi,phi_idx_s); buf_cnt = 0; } } // End of neighbours in this cell } // End of cells // Empty buffer before writing phi_s compute_buffer_direct(C,D,buf_rsq,buf_cnt,xi,buf_idx_t,buf_xr,nvec,fvec,ns,fs,phi,phi_idx_s); buf_cnt = 0; // Save additions to point s for(int i=0; i<3; i++) phi[idx_s*3+i] += phi_idx_s[i]; } // End of particles #ifdef _OPENMP // Yes, this reduction is probably crap HPC-wise, // but it works well on my quad core right now. struct timeval tic_red, toc_red; #pragma omp master gettimeofday(&tic_red, NULL); for(i=0; i<3*N; i++) { #pragma omp atomic phi_out[i] += phi[i]; } #pragma omp master { gettimeofday(&toc_red, NULL); double time_spent = DELTA(tic_red,toc_red); if(VERBOSE) __PRINTF("[RSRC] Reduction took %.3f seconds.\n", time_spent); } // free/malloc not thread safe under MEX CRITICAL { __FREE(phi); } #else __FREE(phi_out); phi_out = phi; #endif } // End parallel section gettimeofday(&toc, NULL); time_spent = DELTA(tic,toc); gettimeofday(&tic, NULL); __FREE(cell_list); __FREE(cell_idx); __FREE(x); __FREE(nvec); __FREE(fvec); double* restrict phi_tr = __MALLOC(3*N*sizeof(double)); for(int i=0; i<N; i++) { for(int j=0; j<3; j++) { phi_tr[i+j*N] = phi_out[i*3+j]; } } __FREE(phi_out); gettimeofday(&toc, NULL); time_tr += DELTA(tic,toc); if(VERBOSE) { __PRINTF("[RSRC] Transpose time: %.3f seconds.\n", time_tr); __PRINTF("[RSRC] phi computed in %.3f seconds.\n", time_spent); } *phi_p = phi_tr; }
GB_binop__isgt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_01__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int64) // A*D function (colscale): GB (_AxD__isgt_int64) // D*A function (rowscale): GB (_DxB__isgt_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int64) // C=scalar+B GB (_bind1st__isgt_int64) // C=scalar+B' GB (_bind1st_tran__isgt_int64) // C=A+scalar GB (_bind2nd__isgt_int64) // C=A'+scalar GB (_bind2nd_tran__isgt_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT64 || GxB_NO_ISGT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif /* Definitions */ #define LCMSHDRI #if !defined(MAGICKCORE_HDRI_SUPPORT) #if (MAGICKCORE_QUANTUM_DEPTH == 8) #undef LCMSHDRI #define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel) #define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel) typedef unsigned short LCMSType; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) #undef LCMSHDRI #define LCMSScaleSource(pixel) (pixel) #define LCMSScaleTarget(pixel) (pixel) typedef unsigned short LCMSType; #endif #endif #if defined(LCMSHDRI) #define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel)) #define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel)) typedef double LCMSType; #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static LCMSType **DestroyPixelThreadSet(LCMSType **pixels) { register ssize_t i; if (pixels != (LCMSType **) NULL) return((LCMSType **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (LCMSType *) NULL) pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]); pixels=(LCMSType **) RelinquishMagickMemory(pixels); return(pixels); } static LCMSType **AcquirePixelThreadSet(const size_t columns, const size_t channels) { LCMSType **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (LCMSType **) NULL) return((LCMSType **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (LCMSType *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif #if defined(MAGICKCORE_LCMS_DELEGATE) static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) context; if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'",image->filename); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); /* Future. value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image,exception); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsHPROFILE source_profile; CMSExceptionInfo cms_exception; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(CMSExceptionHandler); cms_exception.image=image; cms_exception.exception=exception; (void) cms_exception; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags, source_type, target_type; int intent; LCMSType **magick_restrict source_pixels, **magick_restrict target_pixels; #if defined(LCMSHDRI) LCMSType source_scale, target_scale; #endif MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } #if defined(LCMSHDRI) source_scale=1.0; #endif source_channels=3; switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_channels=4; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_CMYK_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_channels=1; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else source_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_colorspace=LabColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_Lab_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { source_colorspace=sRGBColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_RGB_DBL; #else source_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else source_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } (void) source_colorspace; signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); #if defined(LCMSHDRI) target_scale=1.0; #endif target_channels=3; switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_channels=4; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_CMYK_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_channels=1; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else target_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_colorspace=LabColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_Lab_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { target_colorspace=sRGBColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_RGB_DBL; #else target_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else target_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (LCMSType **) NULL) || (target_pixels == (LCMSType **) NULL)) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register LCMSType *p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=LCMSScaleSource(GetPixelRed(image,q)); if (source_channels > 1) { *p++=LCMSScaleSource(GetPixelGreen(image,q)); *p++=LCMSScaleSource(GetPixelBlue(image,q)); } if (source_channels > 3) *p++=LCMSScaleSource(GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_channels == 1) SetPixelGray(image,LCMSScaleTarget(*p),q); else SetPixelRed(image,LCMSScaleTarget(*p),q); p++; if (target_channels > 1) { SetPixelGreen(image,LCMSScaleTarget(*p),q); p++; SetPixelBlue(image,LCMSScaleTarget(*p),q); p++; } if (target_channels > 3) { SetPixelBlack(image,LCMSScaleTarget(*p),q); p++; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent], property[MagickPathExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MagickPathExtent,"%s:*",name); (void) GetImageProperty(image,property,exception); return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
edges.c
#include <stddef.h> #ifdef __cplusplus extern "C" { #endif extern void CXX_Walk_Int(char *l, const char *h, const size_t sz, unsigned int *b); extern void CXX_Walk_Double(char *l, const char *h, const size_t sz, double *b); #ifdef __cplusplus } #endif #include <stdio.h> #include <stdint.h> #include <omp.h> #include "allocator.h" #include "geometry.h" //#include "fio.h" #include "mesh.h" #include "index.h" /* Allocate the edges */ size_t emalloc(char *fbuf, struct etbl *e) { const size_t sz = e->sz; const size_t ndsz = sz * 2; uint32_t *buf0 = (uint32_t *) fun3d_malloc(ndsz, sizeof(uint32_t)); size_t bytes0 = ndsz * sizeof(uint32_t); //struct wtbl w0; //{ // w0.l = fbuf; // w0.h = fbuf + bytes0; // w0.t = UINT; // w0.sz = ndsz; //} //walkfbuf(&w0, buf0); CXX_Walk_Int(fbuf, fbuf + bytes0, ndsz, buf0); const size_t nrsz = sz * 4; double *buf1 = (double *) fun3d_malloc(nrsz, sizeof(double)); size_t bytes1 = nrsz * sizeof(double); //struct wtbl w1; //{ // w1.l = w0.h; // w1.h = w0.h + bytes1; // w1.t = DOUBLE; // w1.sz = nrsz; //} //walkfbuf(&w1, buf1); CXX_Walk_Double(fbuf + bytes0, fbuf + bytes0 + bytes1, nrsz, buf1); // Find the permutation array of a sorted sequence to reorder the // edges and their normals uint32_t *p = (uint32_t *) fun3d_malloc(sz, sizeof(uint32_t)); imain(sz, buf0, p); // Reorder the edge endpoints and their normals uint32_t i; #pragma omp parallel for for(i = 0; i < sz; i++) { // Edge endpoints e->eptr->n0[i] = buf0[p[i]] - 1; // From Fortran to C e->eptr->n1[i] = buf0[p[i] + sz] - 1; // From Fortran to C // Unit normals of dual faces and area of the dual mesh face e->xyzn->x0[i] = buf1[p[i]]; e->xyzn->x1[i] = buf1[p[i] + sz]; e->xyzn->x2[i] = buf1[p[i] + sz + sz]; e->xyzn->x3[i] = buf1[p[i] + sz + sz + sz]; } fun3d_free(buf0); fun3d_free(buf1); fun3d_free(p); return (bytes0 + bytes1); }
implicit_task_data.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // This test checks that values stored in task_data in a barrier_begin event // are still present in the corresponding barrier_end event. // Therefore, callback implementations different from the ones in callback.h are neccessary. // This is a test for an issue reported in // https://github.com/OpenMPToolsInterface/LLVM-openmp/issues/39 #define _BSD_SOURCE #include <stdio.h> #include <unistd.h> #include <inttypes.h> #include <omp.h> #include <ompt.h> static const char* ompt_thread_type_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_thread_data_t ompt_get_thread_data; int main() { #pragma omp parallel num_threads(4) { #pragma omp master { sleep(1); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // master thread implicit barrier at parallel end // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}} // worker thread implicit barrier at parallel end // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]] return 0; } static void on_ompt_callback_thread_begin( ompt_thread_type_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_type_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_sync_region( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: task_data->value = ompt_get_unique_id(); if(kind == ompt_sync_region_barrier) printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: if(kind == ompt_sync_region_barrier) printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: if(kind == ompt_sync_region_barrier) printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: if(kind == ompt_sync_region_barrier) printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t *tool_data) { ompt_set_callback_t ompt_set_callback; ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_thread_begin); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; }
parser.c
/* C++ Parser. Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. Written by Mark Mitchell <mark@codesourcery.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "timevar.h" #include "cpplib.h" #include "tree.h" #include "cp-tree.h" #include "intl.h" #include "c-family/c-pragma.h" #include "decl.h" #include "flags.h" #include "diagnostic-core.h" #include "output.h" #include "target.h" #include "cgraph.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "plugin.h" #include "tree-pretty-print.h" #include "parser.h" /* The lexer. */ /* The cp_lexer_* routines mediate between the lexer proper (in libcpp and c-lex.c) and the C++ parser. */ static cp_token eof_token = { CPP_EOF, RID_MAX, 0, PRAGMA_NONE, false, false, false, 0, { NULL } }; /* The various kinds of non integral constant we encounter. */ typedef enum non_integral_constant { NIC_NONE, /* floating-point literal */ NIC_FLOAT, /* %<this%> */ NIC_THIS, /* %<__FUNCTION__%> */ NIC_FUNC_NAME, /* %<__PRETTY_FUNCTION__%> */ NIC_PRETTY_FUNC, /* %<__func__%> */ NIC_C99_FUNC, /* "%<va_arg%> */ NIC_VA_ARG, /* a cast */ NIC_CAST, /* %<typeid%> operator */ NIC_TYPEID, /* non-constant compound literals */ NIC_NCC, /* a function call */ NIC_FUNC_CALL, /* an increment */ NIC_INC, /* an decrement */ NIC_DEC, /* an array reference */ NIC_ARRAY_REF, /* %<->%> */ NIC_ARROW, /* %<.%> */ NIC_POINT, /* the address of a label */ NIC_ADDR_LABEL, /* %<*%> */ NIC_STAR, /* %<&%> */ NIC_ADDR, /* %<++%> */ NIC_PREINCREMENT, /* %<--%> */ NIC_PREDECREMENT, /* %<new%> */ NIC_NEW, /* %<delete%> */ NIC_DEL, /* calls to overloaded operators */ NIC_OVERLOADED, /* an assignment */ NIC_ASSIGNMENT, /* a comma operator */ NIC_COMMA, /* a call to a constructor */ NIC_CONSTRUCTOR, /* a transaction expression */ NIC_TRANSACTION } non_integral_constant; /* The various kinds of errors about name-lookup failing. */ typedef enum name_lookup_error { /* NULL */ NLE_NULL, /* is not a type */ NLE_TYPE, /* is not a class or namespace */ NLE_CXX98, /* is not a class, namespace, or enumeration */ NLE_NOT_CXX98 } name_lookup_error; /* The various kinds of required token */ typedef enum required_token { RT_NONE, RT_SEMICOLON, /* ';' */ RT_OPEN_PAREN, /* '(' */ RT_CLOSE_BRACE, /* '}' */ RT_OPEN_BRACE, /* '{' */ RT_CLOSE_SQUARE, /* ']' */ RT_OPEN_SQUARE, /* '[' */ RT_COMMA, /* ',' */ RT_SCOPE, /* '::' */ RT_LESS, /* '<' */ RT_GREATER, /* '>' */ RT_EQ, /* '=' */ RT_ELLIPSIS, /* '...' */ RT_MULT, /* '*' */ RT_COMPL, /* '~' */ RT_COLON, /* ':' */ RT_COLON_SCOPE, /* ':' or '::' */ RT_CLOSE_PAREN, /* ')' */ RT_COMMA_CLOSE_PAREN, /* ',' or ')' */ RT_PRAGMA_EOL, /* end of line */ RT_NAME, /* identifier */ /* The type is CPP_KEYWORD */ RT_NEW, /* new */ RT_DELETE, /* delete */ RT_RETURN, /* return */ RT_WHILE, /* while */ RT_EXTERN, /* extern */ RT_STATIC_ASSERT, /* static_assert */ RT_DECLTYPE, /* decltype */ RT_OPERATOR, /* operator */ RT_CLASS, /* class */ RT_TEMPLATE, /* template */ RT_NAMESPACE, /* namespace */ RT_USING, /* using */ RT_ASM, /* asm */ RT_TRY, /* try */ RT_CATCH, /* catch */ RT_THROW, /* throw */ RT_LABEL, /* __label__ */ RT_AT_TRY, /* @try */ RT_AT_SYNCHRONIZED, /* @synchronized */ RT_AT_THROW, /* @throw */ RT_SELECT, /* selection-statement */ RT_INTERATION, /* iteration-statement */ RT_JUMP, /* jump-statement */ RT_CLASS_KEY, /* class-key */ RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */ RT_TRANSACTION_ATOMIC, /* __transaction_atomic */ RT_TRANSACTION_RELAXED, /* __transaction_relaxed */ RT_TRANSACTION_CANCEL /* __transaction_cancel */ } required_token; /* Prototypes. */ static cp_lexer *cp_lexer_new_main (void); static cp_lexer *cp_lexer_new_from_tokens (cp_token_cache *tokens); static void cp_lexer_destroy (cp_lexer *); static int cp_lexer_saving_tokens (const cp_lexer *); static cp_token *cp_lexer_token_at (cp_lexer *, cp_token_position); static void cp_lexer_get_preprocessor_token (cp_lexer *, cp_token *); static inline cp_token *cp_lexer_peek_token (cp_lexer *); static cp_token *cp_lexer_peek_nth_token (cp_lexer *, size_t); static inline bool cp_lexer_next_token_is (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_not (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_keyword (cp_lexer *, enum rid); static cp_token *cp_lexer_consume_token (cp_lexer *); static void cp_lexer_purge_token (cp_lexer *); static void cp_lexer_purge_tokens_after (cp_lexer *, cp_token_position); static void cp_lexer_save_tokens (cp_lexer *); static void cp_lexer_commit_tokens (cp_lexer *); static void cp_lexer_rollback_tokens (cp_lexer *); static void cp_lexer_print_token (FILE *, cp_token *); static inline bool cp_lexer_debugging_p (cp_lexer *); static void cp_lexer_start_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static void cp_lexer_stop_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static cp_token_cache *cp_token_cache_new (cp_token *, cp_token *); static void cp_parser_initial_pragma (cp_token *); static tree cp_literal_operator_id (const char *); /* Manifest constants. */ #define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token)) #define CP_SAVED_TOKEN_STACK 5 /* Variables. */ /* The stream to which debugging output should be written. */ static FILE *cp_lexer_debug_stream; /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. */ int cp_unevaluated_operand; /* Dump up to NUM tokens in BUFFER to FILE starting with token START_TOKEN. If START_TOKEN is NULL, the dump starts with the first token in BUFFER. If NUM is 0, dump all the tokens. If CURR_TOKEN is set and it is one of the tokens in BUFFER, it will be highlighted by surrounding it in [[ ]]. */ static void cp_lexer_dump_tokens (FILE *file, VEC(cp_token,gc) *buffer, cp_token *start_token, unsigned num, cp_token *curr_token) { unsigned i, nprinted; cp_token *token; bool do_print; fprintf (file, "%u tokens\n", VEC_length (cp_token, buffer)); if (buffer == NULL) return; if (num == 0) num = VEC_length (cp_token, buffer); if (start_token == NULL) start_token = VEC_address (cp_token, buffer); if (start_token > VEC_address (cp_token, buffer)) { cp_lexer_print_token (file, VEC_index (cp_token, buffer, 0)); fprintf (file, " ... "); } do_print = false; nprinted = 0; for (i = 0; VEC_iterate (cp_token, buffer, i, token) && nprinted < num; i++) { if (token == start_token) do_print = true; if (!do_print) continue; nprinted++; if (token == curr_token) fprintf (file, "[["); cp_lexer_print_token (file, token); if (token == curr_token) fprintf (file, "]]"); switch (token->type) { case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: case CPP_EOF: fputc ('\n', file); break; default: fputc (' ', file); } } if (i == num && i < VEC_length (cp_token, buffer)) { fprintf (file, " ... "); cp_lexer_print_token (file, VEC_index (cp_token, buffer, VEC_length (cp_token, buffer) - 1)); } fprintf (file, "\n"); } /* Dump all tokens in BUFFER to stderr. */ void cp_lexer_debug_tokens (VEC(cp_token,gc) *buffer) { cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL); } /* Dump the cp_parser tree field T to FILE if T is non-NULL. DESC is the description for T. */ static void cp_debug_print_tree_if_set (FILE *file, const char *desc, tree t) { if (t) { fprintf (file, "%s: ", desc); print_node_brief (file, "", t, 0); } } /* Dump parser context C to FILE. */ static void cp_debug_print_context (FILE *file, cp_parser_context *c) { const char *status_s[] = { "OK", "ERROR", "COMMITTED" }; fprintf (file, "{ status = %s, scope = ", status_s[c->status]); print_node_brief (file, "", c->object_type, 0); fprintf (file, "}\n"); } /* Print the stack of parsing contexts to FILE starting with FIRST. */ static void cp_debug_print_context_stack (FILE *file, cp_parser_context *first) { unsigned i; cp_parser_context *c; fprintf (file, "Parsing context stack:\n"); for (i = 0, c = first; c; c = c->next, i++) { fprintf (file, "\t#%u: ", i); cp_debug_print_context (file, c); } } /* Print the value of FLAG to FILE. DESC is a string describing the flag. */ static void cp_debug_print_flag (FILE *file, const char *desc, bool flag) { if (flag) fprintf (file, "%s: true\n", desc); } /* Print an unparsed function entry UF to FILE. */ static void cp_debug_print_unparsed_function (FILE *file, cp_unparsed_functions_entry *uf) { unsigned i; cp_default_arg_entry *default_arg_fn; tree fn; fprintf (file, "\tFunctions with default args:\n"); for (i = 0; VEC_iterate (cp_default_arg_entry, uf->funs_with_default_args, i, default_arg_fn); i++) { fprintf (file, "\t\tClass type: "); print_node_brief (file, "", default_arg_fn->class_type, 0); fprintf (file, "\t\tDeclaration: "); print_node_brief (file, "", default_arg_fn->decl, 0); fprintf (file, "\n"); } fprintf (file, "\n\tFunctions with definitions that require " "post-processing\n\t\t"); for (i = 0; VEC_iterate (tree, uf->funs_with_definitions, i, fn); i++) { print_node_brief (file, "", fn, 0); fprintf (file, " "); } fprintf (file, "\n"); fprintf (file, "\n\tNon-static data members with initializers that require " "post-processing\n\t\t"); for (i = 0; VEC_iterate (tree, uf->nsdmis, i, fn); i++) { print_node_brief (file, "", fn, 0); fprintf (file, " "); } fprintf (file, "\n"); } /* Print the stack of unparsed member functions S to FILE. */ static void cp_debug_print_unparsed_queues (FILE *file, VEC(cp_unparsed_functions_entry, gc) *s) { unsigned i; cp_unparsed_functions_entry *uf; fprintf (file, "Unparsed functions\n"); for (i = 0; VEC_iterate (cp_unparsed_functions_entry, s, i, uf); i++) { fprintf (file, "#%u:\n", i); cp_debug_print_unparsed_function (file, uf); } } /* Dump the tokens in a window of size WINDOW_SIZE around the next_token for the given PARSER. If FILE is NULL, the output is printed on stderr. */ static void cp_debug_parser_tokens (FILE *file, cp_parser *parser, int window_size) { cp_token *next_token, *first_token, *start_token; if (file == NULL) file = stderr; next_token = parser->lexer->next_token; first_token = VEC_address (cp_token, parser->lexer->buffer); start_token = (next_token > first_token + window_size / 2) ? next_token - window_size / 2 : first_token; cp_lexer_dump_tokens (file, parser->lexer->buffer, start_token, window_size, next_token); } /* Dump debugging information for the given PARSER. If FILE is NULL, the output is printed on stderr. */ void cp_debug_parser (FILE *file, cp_parser *parser) { const size_t window_size = 20; cp_token *token; expanded_location eloc; if (file == NULL) file = stderr; fprintf (file, "Parser state\n\n"); fprintf (file, "Number of tokens: %u\n", VEC_length (cp_token, parser->lexer->buffer)); cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope); cp_debug_print_tree_if_set (file, "Object scope", parser->object_scope); cp_debug_print_tree_if_set (file, "Qualifying scope", parser->qualifying_scope); cp_debug_print_context_stack (file, parser->context); cp_debug_print_flag (file, "Allow GNU extensions", parser->allow_gnu_extensions_p); cp_debug_print_flag (file, "'>' token is greater-than", parser->greater_than_is_operator_p); cp_debug_print_flag (file, "Default args allowed in current " "parameter list", parser->default_arg_ok_p); cp_debug_print_flag (file, "Parsing integral constant-expression", parser->integral_constant_expression_p); cp_debug_print_flag (file, "Allow non-constant expression in current " "constant-expression", parser->allow_non_integral_constant_expression_p); cp_debug_print_flag (file, "Seen non-constant expression", parser->non_integral_constant_expression_p); cp_debug_print_flag (file, "Local names and 'this' forbidden in " "current context", parser->local_variables_forbidden_p); cp_debug_print_flag (file, "In unbraced linkage specification", parser->in_unbraced_linkage_specification_p); cp_debug_print_flag (file, "Parsing a declarator", parser->in_declarator_p); cp_debug_print_flag (file, "In template argument list", parser->in_template_argument_list_p); cp_debug_print_flag (file, "Parsing an iteration statement", parser->in_statement & IN_ITERATION_STMT); cp_debug_print_flag (file, "Parsing a switch statement", parser->in_statement & IN_SWITCH_STMT); cp_debug_print_flag (file, "Parsing a structured OpenMP block", parser->in_statement & IN_OMP_BLOCK); cp_debug_print_flag (file, "Parsing a an OpenMP loop", parser->in_statement & IN_OMP_FOR); cp_debug_print_flag (file, "Parsing an if statement", parser->in_statement & IN_IF_STMT); cp_debug_print_flag (file, "Parsing a type-id in an expression " "context", parser->in_type_id_in_expr_p); cp_debug_print_flag (file, "Declarations are implicitly extern \"C\"", parser->implicit_extern_c); cp_debug_print_flag (file, "String expressions should be translated " "to execution character set", parser->translate_strings_p); cp_debug_print_flag (file, "Parsing function body outside of a " "local class", parser->in_function_body); cp_debug_print_flag (file, "Auto correct a colon to a scope operator", parser->colon_corrects_to_scope_p); if (parser->type_definition_forbidden_message) fprintf (file, "Error message for forbidden type definitions: %s\n", parser->type_definition_forbidden_message); cp_debug_print_unparsed_queues (file, parser->unparsed_queues); fprintf (file, "Number of class definitions in progress: %u\n", parser->num_classes_being_defined); fprintf (file, "Number of template parameter lists for the current " "declaration: %u\n", parser->num_template_parameter_lists); cp_debug_parser_tokens (file, parser, window_size); token = parser->lexer->next_token; fprintf (file, "Next token to parse:\n"); fprintf (file, "\tToken: "); cp_lexer_print_token (file, token); eloc = expand_location (token->location); fprintf (file, "\n\tFile: %s\n", eloc.file); fprintf (file, "\tLine: %d\n", eloc.line); fprintf (file, "\tColumn: %d\n", eloc.column); } /* Allocate memory for a new lexer object and return it. */ static cp_lexer * cp_lexer_alloc (void) { cp_lexer *lexer; c_common_no_more_pch (); /* Allocate the memory. */ lexer = ggc_alloc_cleared_cp_lexer (); /* Initially we are not debugging. */ lexer->debugging_p = false; lexer->saved_tokens = VEC_alloc (cp_token_position, heap, CP_SAVED_TOKEN_STACK); /* Create the buffer. */ lexer->buffer = VEC_alloc (cp_token, gc, CP_LEXER_BUFFER_SIZE); return lexer; } /* Create a new main C++ lexer, the lexer that gets tokens from the preprocessor. */ static cp_lexer * cp_lexer_new_main (void) { cp_lexer *lexer; cp_token token; /* It's possible that parsing the first pragma will load a PCH file, which is a GC collection point. So we have to do that before allocating any memory. */ cp_parser_initial_pragma (&token); lexer = cp_lexer_alloc (); /* Put the first token in the buffer. */ VEC_quick_push (cp_token, lexer->buffer, &token); /* Get the remaining tokens from the preprocessor. */ while (token.type != CPP_EOF) { cp_lexer_get_preprocessor_token (lexer, &token); VEC_safe_push (cp_token, gc, lexer->buffer, &token); } lexer->last_token = VEC_address (cp_token, lexer->buffer) + VEC_length (cp_token, lexer->buffer) - 1; lexer->next_token = VEC_length (cp_token, lexer->buffer) ? VEC_address (cp_token, lexer->buffer) : &eof_token; /* Subsequent preprocessor diagnostics should use compiler diagnostic functions to get the compiler source location. */ done_lexing = true; gcc_assert (!lexer->next_token->purged_p); return lexer; } /* Create a new lexer whose token stream is primed with the tokens in CACHE. When these tokens are exhausted, no new tokens will be read. */ static cp_lexer * cp_lexer_new_from_tokens (cp_token_cache *cache) { cp_token *first = cache->first; cp_token *last = cache->last; cp_lexer *lexer = ggc_alloc_cleared_cp_lexer (); /* We do not own the buffer. */ lexer->buffer = NULL; lexer->next_token = first == last ? &eof_token : first; lexer->last_token = last; lexer->saved_tokens = VEC_alloc (cp_token_position, heap, CP_SAVED_TOKEN_STACK); /* Initially we are not debugging. */ lexer->debugging_p = false; gcc_assert (!lexer->next_token->purged_p); return lexer; } /* Frees all resources associated with LEXER. */ static void cp_lexer_destroy (cp_lexer *lexer) { VEC_free (cp_token, gc, lexer->buffer); VEC_free (cp_token_position, heap, lexer->saved_tokens); ggc_free (lexer); } /* Returns nonzero if debugging information should be output. */ static inline bool cp_lexer_debugging_p (cp_lexer *lexer) { return lexer->debugging_p; } static inline cp_token_position cp_lexer_token_position (cp_lexer *lexer, bool previous_p) { gcc_assert (!previous_p || lexer->next_token != &eof_token); return lexer->next_token - previous_p; } static inline cp_token * cp_lexer_token_at (cp_lexer *lexer ATTRIBUTE_UNUSED, cp_token_position pos) { return pos; } static inline void cp_lexer_set_token_position (cp_lexer *lexer, cp_token_position pos) { lexer->next_token = cp_lexer_token_at (lexer, pos); } static inline cp_token_position cp_lexer_previous_token_position (cp_lexer *lexer) { if (lexer->next_token == &eof_token) return lexer->last_token - 1; else return cp_lexer_token_position (lexer, true); } static inline cp_token * cp_lexer_previous_token (cp_lexer *lexer) { cp_token_position tp = cp_lexer_previous_token_position (lexer); return cp_lexer_token_at (lexer, tp); } /* nonzero if we are presently saving tokens. */ static inline int cp_lexer_saving_tokens (const cp_lexer* lexer) { return VEC_length (cp_token_position, lexer->saved_tokens) != 0; } /* Store the next token from the preprocessor in *TOKEN. Return true if we reach EOF. If LEXER is NULL, assume we are handling an initial #pragma pch_preprocess, and thus want the lexer to return processed strings. */ static void cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token) { static int is_extern_c = 0; /* Get a new token from the preprocessor. */ token->type = c_lex_with_flags (&token->u.value, &token->location, &token->flags, lexer == NULL ? 0 : C_LEX_STRING_NO_JOIN); token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; token->purged_p = false; /* On some systems, some header files are surrounded by an implicit extern "C" block. Set a flag in the token if it comes from such a header. */ is_extern_c += pending_lang_change; pending_lang_change = 0; token->implicit_extern_c = is_extern_c > 0; /* Check to see if this token is a keyword. */ if (token->type == CPP_NAME) { if (C_IS_RESERVED_WORD (token->u.value)) { /* Mark this token as a keyword. */ token->type = CPP_KEYWORD; /* Record which keyword. */ token->keyword = C_RID_CODE (token->u.value); } else { if (warn_cxx0x_compat && C_RID_CODE (token->u.value) >= RID_FIRST_CXX0X && C_RID_CODE (token->u.value) <= RID_LAST_CXX0X) { /* Warn about the C++0x keyword (but still treat it as an identifier). */ warning (OPT_Wc__0x_compat, "identifier %qE is a keyword in C++11", token->u.value); /* Clear out the C_RID_CODE so we don't warn about this particular identifier-turned-keyword again. */ C_SET_RID_CODE (token->u.value, RID_MAX); } token->ambiguous_p = false; token->keyword = RID_MAX; } } else if (token->type == CPP_AT_NAME) { /* This only happens in Objective-C++; it must be a keyword. */ token->type = CPP_KEYWORD; switch (C_RID_CODE (token->u.value)) { /* Replace 'class' with '@class', 'private' with '@private', etc. This prevents confusion with the C++ keyword 'class', and makes the tokens consistent with other Objective-C 'AT' keywords. For example '@class' is reported as RID_AT_CLASS which is consistent with '@synchronized', which is reported as RID_AT_SYNCHRONIZED. */ case RID_CLASS: token->keyword = RID_AT_CLASS; break; case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break; case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break; case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break; case RID_THROW: token->keyword = RID_AT_THROW; break; case RID_TRY: token->keyword = RID_AT_TRY; break; case RID_CATCH: token->keyword = RID_AT_CATCH; break; default: token->keyword = C_RID_CODE (token->u.value); } } else if (token->type == CPP_PRAGMA) { /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = ((enum pragma_kind) TREE_INT_CST_LOW (token->u.value)); token->u.value = NULL_TREE; } } /* Update the globals input_location and the input file stack from TOKEN. */ static inline void cp_lexer_set_source_position_from_token (cp_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Return a pointer to the next token in the token stream, but do not consume it. */ static inline cp_token * cp_lexer_peek_token (cp_lexer *lexer) { if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token); putc ('\n', cp_lexer_debug_stream); } return lexer->next_token; } /* Return true if the next token has the indicated TYPE. */ static inline bool cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type) { return cp_lexer_peek_token (lexer)->type == type; } /* Return true if the next token does not have the indicated TYPE. */ static inline bool cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type) { return !cp_lexer_next_token_is (lexer, type); } /* Return true if the next token is the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword == keyword; } /* Return true if the next token is not the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_not_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword != keyword; } /* Return true if the next token is a keyword for a decl-specifier. */ static bool cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer) { cp_token *token; token = cp_lexer_peek_token (lexer); switch (token->keyword) { /* auto specifier: storage-class-specifier in C++, simple-type-specifier in C++0x. */ case RID_AUTO: /* Storage classes. */ case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Elaborated type specifiers. */ case RID_ENUM: case RID_CLASS: case RID_STRUCT: case RID_UNION: case RID_TYPENAME: /* Simple type specifiers. */ case RID_CHAR: case RID_CHAR16: case RID_CHAR32: case RID_WCHAR: case RID_BOOL: case RID_SHORT: case RID_INT: case RID_LONG: case RID_INT128: case RID_SIGNED: case RID_UNSIGNED: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: /* GNU extensions. */ case RID_ATTRIBUTE: case RID_TYPEOF: /* C++0x extensions. */ case RID_DECLTYPE: case RID_UNDERLYING_TYPE: return true; default: return false; } } /* Returns TRUE iff the token T begins a decltype type. */ static bool token_is_decltype (cp_token *t) { return (t->keyword == RID_DECLTYPE || t->type == CPP_DECLTYPE); } /* Returns TRUE iff the next token begins a decltype type. */ static bool cp_lexer_next_token_is_decltype (cp_lexer *lexer) { cp_token *t = cp_lexer_peek_token (lexer); return token_is_decltype (t); } /* Return a pointer to the Nth token in the token stream. If N is 1, then this is precisely equivalent to cp_lexer_peek_token (except that it is not inline). One would like to disallow that case, but there is one case (cp_parser_nth_token_starts_template_id) where the caller passes a variable for N and it might be 1. */ static cp_token * cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n) { cp_token *token; /* N is 1-based, not zero-based. */ gcc_assert (n > 0); if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: peeking ahead %ld at token: ", (long)n); --n; token = lexer->next_token; gcc_assert (!n || token != &eof_token); while (n != 0) { ++token; if (token == lexer->last_token) { token = &eof_token; break; } if (!token->purged_p) --n; } if (cp_lexer_debugging_p (lexer)) { cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Return the next token, and advance the lexer's next_token pointer to point to the next non-purged token. */ static cp_token * cp_lexer_consume_token (cp_lexer* lexer) { cp_token *token = lexer->next_token; gcc_assert (token != &eof_token); gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL); do { lexer->next_token++; if (lexer->next_token == lexer->last_token) { lexer->next_token = &eof_token; break; } } while (lexer->next_token->purged_p); cp_lexer_set_source_position_from_token (token); /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Permanently remove the next token from the token stream, and advance the next_token pointer to refer to the next non-purged token. */ static void cp_lexer_purge_token (cp_lexer *lexer) { cp_token *tok = lexer->next_token; gcc_assert (tok != &eof_token); tok->purged_p = true; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; do { tok++; if (tok == lexer->last_token) { tok = &eof_token; break; } } while (tok->purged_p); lexer->next_token = tok; } /* Permanently remove all tokens after TOK, up to, but not including, the token that will be returned next by cp_lexer_peek_token. */ static void cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok) { cp_token *peek = lexer->next_token; if (peek == &eof_token) peek = lexer->last_token; gcc_assert (tok < peek); for ( tok += 1; tok != peek; tok += 1) { tok->purged_p = true; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; } } /* Begin saving tokens. All tokens consumed after this point will be preserved. */ static void cp_lexer_save_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n"); VEC_safe_push (cp_token_position, heap, lexer->saved_tokens, lexer->next_token); } /* Commit to the portion of the token stream most recently saved. */ static void cp_lexer_commit_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n"); VEC_pop (cp_token_position, lexer->saved_tokens); } /* Return all tokens saved since the last call to cp_lexer_save_tokens to the token stream. Stop saving tokens. */ static void cp_lexer_rollback_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n"); lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens); } /* Print a representation of the TOKEN on the STREAM. */ static void cp_lexer_print_token (FILE * stream, cp_token *token) { /* We don't use cpp_type2name here because the parser defines a few tokens of its own. */ static const char *const token_names[] = { /* cpplib-defined token types */ #define OP(e, s) #e, #define TK(e, s) #e, TTYPE_TABLE #undef OP #undef TK /* C++ parser token types - see "Manifest constants", above. */ "KEYWORD", "TEMPLATE_ID", "NESTED_NAME_SPECIFIER", }; /* For some tokens, print the associated data. */ switch (token->type) { case CPP_KEYWORD: /* Some keywords have a value that is not an IDENTIFIER_NODE. For example, `struct' is mapped to an INTEGER_CST. */ if (TREE_CODE (token->u.value) != IDENTIFIER_NODE) break; /* else fall through */ case CPP_NAME: fputs (IDENTIFIER_POINTER (token->u.value), stream); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value)); break; case CPP_NUMBER: print_generic_expr (stream, token->u.value, 0); break; default: /* If we have a name for the token, print it out. Otherwise, we simply give the numeric code. */ if (token->type < ARRAY_SIZE(token_names)) fputs (token_names[token->type], stream); else fprintf (stream, "[%d]", token->type); break; } } /* Start emitting debugging information. */ static void cp_lexer_start_debugging (cp_lexer* lexer) { lexer->debugging_p = true; cp_lexer_debug_stream = stderr; } /* Stop emitting debugging information. */ static void cp_lexer_stop_debugging (cp_lexer* lexer) { lexer->debugging_p = false; cp_lexer_debug_stream = NULL; } /* Create a new cp_token_cache, representing a range of tokens. */ static cp_token_cache * cp_token_cache_new (cp_token *first, cp_token *last) { cp_token_cache *cache = ggc_alloc_cp_token_cache (); cache->first = first; cache->last = last; return cache; } /* Decl-specifiers. */ /* Set *DECL_SPECS to represent an empty decl-specifier-seq. */ static void clear_decl_specs (cp_decl_specifier_seq *decl_specs) { memset (decl_specs, 0, sizeof (cp_decl_specifier_seq)); } /* Declarators. */ /* Nothing other than the parser should be creating declarators; declarators are a semi-syntactic representation of C++ entities. Other parts of the front end that need to create entities (like VAR_DECLs or FUNCTION_DECLs) should do that directly. */ static cp_declarator *make_call_declarator (cp_declarator *, tree, cp_cv_quals, cp_virt_specifiers, tree, tree); static cp_declarator *make_array_declarator (cp_declarator *, tree); static cp_declarator *make_pointer_declarator (cp_cv_quals, cp_declarator *); static cp_declarator *make_reference_declarator (cp_cv_quals, cp_declarator *, bool); static cp_parameter_declarator *make_parameter_declarator (cp_decl_specifier_seq *, cp_declarator *, tree); static cp_declarator *make_ptrmem_declarator (cp_cv_quals, tree, cp_declarator *); /* An erroneous declarator. */ static cp_declarator *cp_error_declarator; /* The obstack on which declarators and related data structures are allocated. */ static struct obstack declarator_obstack; /* Alloc BYTES from the declarator memory pool. */ static inline void * alloc_declarator (size_t bytes) { return obstack_alloc (&declarator_obstack, bytes); } /* Allocate a declarator of the indicated KIND. Clear fields that are common to all declarators. */ static cp_declarator * make_declarator (cp_declarator_kind kind) { cp_declarator *declarator; declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator)); declarator->kind = kind; declarator->attributes = NULL_TREE; declarator->declarator = NULL; declarator->parameter_pack_p = false; declarator->id_loc = UNKNOWN_LOCATION; return declarator; } /* Make a declarator for a generalized identifier. If QUALIFYING_SCOPE is non-NULL, the identifier is QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just UNQUALIFIED_NAME. SFK indicates the kind of special function this is, if any. */ static cp_declarator * make_id_declarator (tree qualifying_scope, tree unqualified_name, special_function_kind sfk) { cp_declarator *declarator; /* It is valid to write: class C { void f(); }; typedef C D; void D::f(); The standard is not clear about whether `typedef const C D' is legal; as of 2002-09-15 the committee is considering that question. EDG 3.0 allows that syntax. Therefore, we do as well. */ if (qualifying_scope && TYPE_P (qualifying_scope)) qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope); gcc_assert (TREE_CODE (unqualified_name) == IDENTIFIER_NODE || TREE_CODE (unqualified_name) == BIT_NOT_EXPR || TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR); declarator = make_declarator (cdk_id); declarator->u.id.qualifying_scope = qualifying_scope; declarator->u.id.unqualified_name = unqualified_name; declarator->u.id.sfk = sfk; return declarator; } /* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list of modifiers such as const or volatile to apply to the pointer type, represented as identifiers. */ cp_declarator * make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target) { cp_declarator *declarator; declarator = make_declarator (cdk_pointer); declarator->declarator = target; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = NULL_TREE; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Like make_pointer_declarator -- but for references. */ cp_declarator * make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target, bool rvalue_ref) { cp_declarator *declarator; declarator = make_declarator (cdk_reference); declarator->declarator = target; declarator->u.reference.qualifiers = cv_qualifiers; declarator->u.reference.rvalue_ref = rvalue_ref; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Like make_pointer_declarator -- but for a pointer to a non-static member of CLASS_TYPE. */ cp_declarator * make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type, cp_declarator *pointee) { cp_declarator *declarator; declarator = make_declarator (cdk_ptrmem); declarator->declarator = pointee; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = class_type; if (pointee) { declarator->parameter_pack_p = pointee->parameter_pack_p; pointee->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Make a declarator for the function given by TARGET, with the indicated PARMS. The CV_QUALIFIERS aply to the function, as in "const"-qualified member function. The EXCEPTION_SPECIFICATION indicates what exceptions can be thrown. */ cp_declarator * make_call_declarator (cp_declarator *target, tree parms, cp_cv_quals cv_qualifiers, cp_virt_specifiers virt_specifiers, tree exception_specification, tree late_return_type) { cp_declarator *declarator; declarator = make_declarator (cdk_function); declarator->declarator = target; declarator->u.function.parameters = parms; declarator->u.function.qualifiers = cv_qualifiers; declarator->u.function.virt_specifiers = virt_specifiers; declarator->u.function.exception_specification = exception_specification; declarator->u.function.late_return_type = late_return_type; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Make a declarator for an array of BOUNDS elements, each of which is defined by ELEMENT. */ cp_declarator * make_array_declarator (cp_declarator *element, tree bounds) { cp_declarator *declarator; declarator = make_declarator (cdk_array); declarator->declarator = element; declarator->u.array.bounds = bounds; if (element) { declarator->id_loc = element->id_loc; declarator->parameter_pack_p = element->parameter_pack_p; element->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Determine whether the declarator we've seen so far can be a parameter pack, when followed by an ellipsis. */ static bool declarator_can_be_parameter_pack (cp_declarator *declarator) { /* Search for a declarator name, or any other declarator that goes after the point where the ellipsis could appear in a parameter pack. If we find any of these, then this declarator can not be made into a parameter pack. */ bool found = false; while (declarator && !found) { switch ((int)declarator->kind) { case cdk_id: case cdk_array: found = true; break; case cdk_error: return true; default: declarator = declarator->declarator; break; } } return !found; } cp_parameter_declarator *no_parameters; /* Create a parameter declarator with the indicated DECL_SPECIFIERS, DECLARATOR and DEFAULT_ARGUMENT. */ cp_parameter_declarator * make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree default_argument) { cp_parameter_declarator *parameter; parameter = ((cp_parameter_declarator *) alloc_declarator (sizeof (cp_parameter_declarator))); parameter->next = NULL; if (decl_specifiers) parameter->decl_specifiers = *decl_specifiers; else clear_decl_specs (&parameter->decl_specifiers); parameter->declarator = declarator; parameter->default_argument = default_argument; parameter->ellipsis_p = false; return parameter; } /* Returns true iff DECLARATOR is a declaration for a function. */ static bool function_declarator_p (const cp_declarator *declarator) { while (declarator) { if (declarator->kind == cdk_function && declarator->declarator->kind == cdk_id) return true; if (declarator->kind == cdk_id || declarator->kind == cdk_error) return false; declarator = declarator->declarator; } return false; } /* The parser. */ /* Overview -------- A cp_parser parses the token stream as specified by the C++ grammar. Its job is purely parsing, not semantic analysis. For example, the parser breaks the token stream into declarators, expressions, statements, and other similar syntactic constructs. It does not check that the types of the expressions on either side of an assignment-statement are compatible, or that a function is not declared with a parameter of type `void'. The parser invokes routines elsewhere in the compiler to perform semantic analysis and to build up the abstract syntax tree for the code processed. The parser (and the template instantiation code, which is, in a way, a close relative of parsing) are the only parts of the compiler that should be calling push_scope and pop_scope, or related functions. The parser (and template instantiation code) keeps track of what scope is presently active; everything else should simply honor that. (The code that generates static initializers may also need to set the scope, in order to check access control correctly when emitting the initializers.) Methodology ----------- The parser is of the standard recursive-descent variety. Upcoming tokens in the token stream are examined in order to determine which production to use when parsing a non-terminal. Some C++ constructs require arbitrary look ahead to disambiguate. For example, it is impossible, in the general case, to tell whether a statement is an expression or declaration without scanning the entire statement. Therefore, the parser is capable of "parsing tentatively." When the parser is not sure what construct comes next, it enters this mode. Then, while we attempt to parse the construct, the parser queues up error messages, rather than issuing them immediately, and saves the tokens it consumes. If the construct is parsed successfully, the parser "commits", i.e., it issues any queued error messages and the tokens that were being preserved are permanently discarded. If, however, the construct is not parsed successfully, the parser rolls back its state completely so that it can resume parsing using a different alternative. Future Improvements ------------------- The performance of the parser could probably be improved substantially. We could often eliminate the need to parse tentatively by looking ahead a little bit. In some places, this approach might not entirely eliminate the need to parse tentatively, but it might still speed up the average case. */ /* Flags that are passed to some parsing functions. These values can be bitwise-ored together. */ enum { /* No flags. */ CP_PARSER_FLAGS_NONE = 0x0, /* The construct is optional. If it is not present, then no error should be issued. */ CP_PARSER_FLAGS_OPTIONAL = 0x1, /* When parsing a type-specifier, treat user-defined type-names as non-type identifiers. */ CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2, /* When parsing a type-specifier, do not try to parse a class-specifier or enum-specifier. */ CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS = 0x4, /* When parsing a decl-specifier-seq, only allow type-specifier or constexpr. */ CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR = 0x8 }; /* This type is used for parameters and variables which hold combinations of the above flags. */ typedef int cp_parser_flags; /* The different kinds of declarators we want to parse. */ typedef enum cp_parser_declarator_kind { /* We want an abstract declarator. */ CP_PARSER_DECLARATOR_ABSTRACT, /* We want a named declarator. */ CP_PARSER_DECLARATOR_NAMED, /* We don't mind, but the name must be an unqualified-id. */ CP_PARSER_DECLARATOR_EITHER } cp_parser_declarator_kind; /* The precedence values used to parse binary expressions. The minimum value of PREC must be 1, because zero is reserved to quickly discriminate binary operators from other tokens. */ enum cp_parser_prec { PREC_NOT_OPERATOR, PREC_LOGICAL_OR_EXPRESSION, PREC_LOGICAL_AND_EXPRESSION, PREC_INCLUSIVE_OR_EXPRESSION, PREC_EXCLUSIVE_OR_EXPRESSION, PREC_AND_EXPRESSION, PREC_EQUALITY_EXPRESSION, PREC_RELATIONAL_EXPRESSION, PREC_SHIFT_EXPRESSION, PREC_ADDITIVE_EXPRESSION, PREC_MULTIPLICATIVE_EXPRESSION, PREC_PM_EXPRESSION, NUM_PREC_VALUES = PREC_PM_EXPRESSION }; /* A mapping from a token type to a corresponding tree node type, with a precedence value. */ typedef struct cp_parser_binary_operations_map_node { /* The token type. */ enum cpp_ttype token_type; /* The corresponding tree code. */ enum tree_code tree_type; /* The precedence of this operator. */ enum cp_parser_prec prec; } cp_parser_binary_operations_map_node; typedef struct cp_parser_expression_stack_entry { /* Left hand side of the binary operation we are currently parsing. */ tree lhs; /* Original tree code for left hand side, if it was a binary expression itself (used for -Wparentheses). */ enum tree_code lhs_type; /* Tree code for the binary operation we are parsing. */ enum tree_code tree_type; /* Precedence of the binary operation we are parsing. */ enum cp_parser_prec prec; } cp_parser_expression_stack_entry; /* The stack for storing partial expressions. We only need NUM_PREC_VALUES entries because precedence levels on the stack are monotonically increasing. */ typedef struct cp_parser_expression_stack_entry cp_parser_expression_stack[NUM_PREC_VALUES]; /* Prototypes. */ /* Constructors and destructors. */ static cp_parser_context *cp_parser_context_new (cp_parser_context *); /* Class variables. */ static GTY((deletable)) cp_parser_context* cp_parser_context_free_list; /* The operator-precedence table used by cp_parser_binary_expression. Transformed into an associative array (binops_by_token) by cp_parser_new. */ static const cp_parser_binary_operations_map_node binops[] = { { CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION }, { CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION }, { CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION }, { CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION }, { CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION }, { CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION }, { CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION } }; /* The same as binops, but initialized by cp_parser_new so that binops_by_token[N].token_type == N. Used in cp_parser_binary_expression for speed. */ static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES]; /* Constructors and destructors. */ /* Construct a new context. The context below this one on the stack is given by NEXT. */ static cp_parser_context * cp_parser_context_new (cp_parser_context* next) { cp_parser_context *context; /* Allocate the storage. */ if (cp_parser_context_free_list != NULL) { /* Pull the first entry from the free list. */ context = cp_parser_context_free_list; cp_parser_context_free_list = context->next; memset (context, 0, sizeof (*context)); } else context = ggc_alloc_cleared_cp_parser_context (); /* No errors have occurred yet in this context. */ context->status = CP_PARSER_STATUS_KIND_NO_ERROR; /* If this is not the bottommost context, copy information that we need from the previous context. */ if (next) { /* If, in the NEXT context, we are parsing an `x->' or `x.' expression, then we are parsing one in this context, too. */ context->object_type = next->object_type; /* Thread the stack. */ context->next = next; } return context; } /* Managing the unparsed function queues. */ #define unparsed_funs_with_default_args \ VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_default_args #define unparsed_funs_with_definitions \ VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_definitions #define unparsed_nsdmis \ VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->nsdmis static void push_unparsed_function_queues (cp_parser *parser) { VEC_safe_push (cp_unparsed_functions_entry, gc, parser->unparsed_queues, NULL); unparsed_funs_with_default_args = NULL; unparsed_funs_with_definitions = make_tree_vector (); unparsed_nsdmis = NULL; } static void pop_unparsed_function_queues (cp_parser *parser) { release_tree_vector (unparsed_funs_with_definitions); VEC_pop (cp_unparsed_functions_entry, parser->unparsed_queues); } /* Prototypes. */ /* Constructors and destructors. */ static cp_parser *cp_parser_new (void); /* Routines to parse various constructs. Those that return `tree' will return the error_mark_node (rather than NULL_TREE) if a parse error occurs, unless otherwise noted. Sometimes, they will return an ordinary node if error-recovery was attempted, even though a parse error occurred. So, to check whether or not a parse error occurred, you should always use cp_parser_error_occurred. If the construct is optional (indicated either by an `_opt' in the name of the function that does the parsing or via a FLAGS parameter), then NULL_TREE is returned if the construct is not present. */ /* Lexical conventions [gram.lex] */ static tree cp_parser_identifier (cp_parser *); static tree cp_parser_string_literal (cp_parser *, bool, bool); static tree cp_parser_userdef_char_literal (cp_parser *); static tree cp_parser_userdef_string_literal (cp_token *); static tree cp_parser_userdef_numeric_literal (cp_parser *); /* Basic concepts [gram.basic] */ static bool cp_parser_translation_unit (cp_parser *); /* Expressions [gram.expr] */ static tree cp_parser_primary_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_id_expression (cp_parser *, bool, bool, bool *, bool, bool); static tree cp_parser_unqualified_id (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier_opt (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier (cp_parser *, bool, bool, bool, bool); static tree cp_parser_qualifying_entity (cp_parser *, bool, bool, bool, bool, bool); static tree cp_parser_postfix_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_postfix_open_square_expression (cp_parser *, tree, bool); static tree cp_parser_postfix_dot_deref_expression (cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t); static VEC(tree,gc) *cp_parser_parenthesized_expression_list (cp_parser *, int, bool, bool, bool *); /* Values for the second parameter of cp_parser_parenthesized_expression_list. */ enum { non_attr = 0, normal_attr = 1, id_attr = 2 }; static void cp_parser_pseudo_destructor_name (cp_parser *, tree *, tree *); static tree cp_parser_unary_expression (cp_parser *, bool, bool, cp_id_kind *); static enum tree_code cp_parser_unary_operator (cp_token *); static tree cp_parser_new_expression (cp_parser *); static VEC(tree,gc) *cp_parser_new_placement (cp_parser *); static tree cp_parser_new_type_id (cp_parser *, tree *); static cp_declarator *cp_parser_new_declarator_opt (cp_parser *); static cp_declarator *cp_parser_direct_new_declarator (cp_parser *); static VEC(tree,gc) *cp_parser_new_initializer (cp_parser *); static tree cp_parser_delete_expression (cp_parser *); static tree cp_parser_cast_expression (cp_parser *, bool, bool, cp_id_kind *); static tree cp_parser_binary_expression (cp_parser *, bool, bool, enum cp_parser_prec, cp_id_kind *); static tree cp_parser_question_colon_clause (cp_parser *, tree); static tree cp_parser_assignment_expression (cp_parser *, bool, cp_id_kind *); static enum tree_code cp_parser_assignment_operator_opt (cp_parser *); static tree cp_parser_expression (cp_parser *, bool, cp_id_kind *); static tree cp_parser_constant_expression (cp_parser *, bool, bool *); static tree cp_parser_builtin_offsetof (cp_parser *); static tree cp_parser_lambda_expression (cp_parser *); static void cp_parser_lambda_introducer (cp_parser *, tree); static bool cp_parser_lambda_declarator_opt (cp_parser *, tree); static void cp_parser_lambda_body (cp_parser *, tree); /* Statements [gram.stmt.stmt] */ static void cp_parser_statement (cp_parser *, tree, bool, bool *); static void cp_parser_label_for_labeled_statement (cp_parser *); static tree cp_parser_expression_statement (cp_parser *, tree); static tree cp_parser_compound_statement (cp_parser *, tree, bool, bool); static void cp_parser_statement_seq_opt (cp_parser *, tree); static tree cp_parser_selection_statement (cp_parser *, bool *); static tree cp_parser_condition (cp_parser *); static tree cp_parser_iteration_statement (cp_parser *); static bool cp_parser_for_init_statement (cp_parser *, tree *decl); static tree cp_parser_for (cp_parser *); static tree cp_parser_c_for (cp_parser *, tree, tree); static tree cp_parser_range_for (cp_parser *, tree, tree, tree); static void do_range_for_auto_deduction (tree, tree); static tree cp_parser_perform_range_for_lookup (tree, tree *, tree *); static tree cp_parser_range_for_member_function (tree, tree); static tree cp_parser_jump_statement (cp_parser *); static void cp_parser_declaration_statement (cp_parser *); static tree cp_parser_implicitly_scoped_statement (cp_parser *, bool *); static void cp_parser_already_scoped_statement (cp_parser *); /* Declarations [gram.dcl.dcl] */ static void cp_parser_declaration_seq_opt (cp_parser *); static void cp_parser_declaration (cp_parser *); static void cp_parser_block_declaration (cp_parser *, bool); static void cp_parser_simple_declaration (cp_parser *, bool, tree *); static void cp_parser_decl_specifier_seq (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *); static tree cp_parser_storage_class_specifier_opt (cp_parser *); static tree cp_parser_function_specifier_opt (cp_parser *, cp_decl_specifier_seq *); static tree cp_parser_type_specifier (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool, int *, bool *); static tree cp_parser_simple_type_specifier (cp_parser *, cp_decl_specifier_seq *, cp_parser_flags); static tree cp_parser_type_name (cp_parser *); static tree cp_parser_nonclass_name (cp_parser* parser); static tree cp_parser_elaborated_type_specifier (cp_parser *, bool, bool); static tree cp_parser_enum_specifier (cp_parser *); static void cp_parser_enumerator_list (cp_parser *, tree); static void cp_parser_enumerator_definition (cp_parser *, tree); static tree cp_parser_namespace_name (cp_parser *); static void cp_parser_namespace_definition (cp_parser *); static void cp_parser_namespace_body (cp_parser *); static tree cp_parser_qualified_namespace_specifier (cp_parser *); static void cp_parser_namespace_alias_definition (cp_parser *); static bool cp_parser_using_declaration (cp_parser *, bool); static void cp_parser_using_directive (cp_parser *); static tree cp_parser_alias_declaration (cp_parser *); static void cp_parser_asm_definition (cp_parser *); static void cp_parser_linkage_specification (cp_parser *); static void cp_parser_static_assert (cp_parser *, bool); static tree cp_parser_decltype (cp_parser *); /* Declarators [gram.dcl.decl] */ static tree cp_parser_init_declarator (cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *, tree *); static cp_declarator *cp_parser_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool *, bool); static cp_declarator *cp_parser_direct_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool); static enum tree_code cp_parser_ptr_operator (cp_parser *, tree *, cp_cv_quals *); static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser *); static cp_virt_specifiers cp_parser_virt_specifier_seq_opt (cp_parser *); static tree cp_parser_late_return_type_opt (cp_parser *, cp_cv_quals); static tree cp_parser_declarator_id (cp_parser *, bool); static tree cp_parser_type_id (cp_parser *); static tree cp_parser_template_type_arg (cp_parser *); static tree cp_parser_trailing_type_id (cp_parser *); static tree cp_parser_type_id_1 (cp_parser *, bool, bool); static void cp_parser_type_specifier_seq (cp_parser *, bool, bool, cp_decl_specifier_seq *); static tree cp_parser_parameter_declaration_clause (cp_parser *); static tree cp_parser_parameter_declaration_list (cp_parser *, bool *); static cp_parameter_declarator *cp_parser_parameter_declaration (cp_parser *, bool, bool *); static tree cp_parser_default_argument (cp_parser *, bool); static void cp_parser_function_body (cp_parser *); static tree cp_parser_initializer (cp_parser *, bool *, bool *); static tree cp_parser_initializer_clause (cp_parser *, bool *); static tree cp_parser_braced_list (cp_parser*, bool*); static VEC(constructor_elt,gc) *cp_parser_initializer_list (cp_parser *, bool *); static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *); /* Classes [gram.class] */ static tree cp_parser_class_name (cp_parser *, bool, bool, enum tag_types, bool, bool, bool); static tree cp_parser_class_specifier (cp_parser *); static tree cp_parser_class_head (cp_parser *, bool *); static enum tag_types cp_parser_class_key (cp_parser *); static void cp_parser_member_specification_opt (cp_parser *); static void cp_parser_member_declaration (cp_parser *); static tree cp_parser_pure_specifier (cp_parser *); static tree cp_parser_constant_initializer (cp_parser *); /* Derived classes [gram.class.derived] */ static tree cp_parser_base_clause (cp_parser *); static tree cp_parser_base_specifier (cp_parser *); /* Special member functions [gram.special] */ static tree cp_parser_conversion_function_id (cp_parser *); static tree cp_parser_conversion_type_id (cp_parser *); static cp_declarator *cp_parser_conversion_declarator_opt (cp_parser *); static bool cp_parser_ctor_initializer_opt (cp_parser *); static void cp_parser_mem_initializer_list (cp_parser *); static tree cp_parser_mem_initializer (cp_parser *); static tree cp_parser_mem_initializer_id (cp_parser *); /* Overloading [gram.over] */ static tree cp_parser_operator_function_id (cp_parser *); static tree cp_parser_operator (cp_parser *); /* Templates [gram.temp] */ static void cp_parser_template_declaration (cp_parser *, bool); static tree cp_parser_template_parameter_list (cp_parser *); static tree cp_parser_template_parameter (cp_parser *, bool *, bool *); static tree cp_parser_type_parameter (cp_parser *, bool *); static tree cp_parser_template_id (cp_parser *, bool, bool, bool); static tree cp_parser_template_name (cp_parser *, bool, bool, bool, bool *); static tree cp_parser_template_argument_list (cp_parser *); static tree cp_parser_template_argument (cp_parser *); static void cp_parser_explicit_instantiation (cp_parser *); static void cp_parser_explicit_specialization (cp_parser *); /* Exception handling [gram.exception] */ static tree cp_parser_try_block (cp_parser *); static bool cp_parser_function_try_block (cp_parser *); static void cp_parser_handler_seq (cp_parser *); static void cp_parser_handler (cp_parser *); static tree cp_parser_exception_declaration (cp_parser *); static tree cp_parser_throw_expression (cp_parser *); static tree cp_parser_exception_specification_opt (cp_parser *); static tree cp_parser_type_id_list (cp_parser *); /* GNU Extensions */ static tree cp_parser_asm_specification_opt (cp_parser *); static tree cp_parser_asm_operand_list (cp_parser *); static tree cp_parser_asm_clobber_list (cp_parser *); static tree cp_parser_asm_label_list (cp_parser *); static tree cp_parser_attributes_opt (cp_parser *); static tree cp_parser_attribute_list (cp_parser *); static bool cp_parser_extension_opt (cp_parser *, int *); static void cp_parser_label_declaration (cp_parser *); /* Transactional Memory Extensions */ static tree cp_parser_transaction (cp_parser *, enum rid); static tree cp_parser_transaction_expression (cp_parser *, enum rid); static bool cp_parser_function_transaction (cp_parser *, enum rid); static tree cp_parser_transaction_cancel (cp_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool cp_parser_pragma (cp_parser *, enum pragma_context); /* Objective-C++ Productions */ static tree cp_parser_objc_message_receiver (cp_parser *); static tree cp_parser_objc_message_args (cp_parser *); static tree cp_parser_objc_message_expression (cp_parser *); static tree cp_parser_objc_encode_expression (cp_parser *); static tree cp_parser_objc_defs_expression (cp_parser *); static tree cp_parser_objc_protocol_expression (cp_parser *); static tree cp_parser_objc_selector_expression (cp_parser *); static tree cp_parser_objc_expression (cp_parser *); static bool cp_parser_objc_selector_p (enum cpp_ttype); static tree cp_parser_objc_selector (cp_parser *); static tree cp_parser_objc_protocol_refs_opt (cp_parser *); static void cp_parser_objc_declaration (cp_parser *, tree); static tree cp_parser_objc_statement (cp_parser *); static bool cp_parser_objc_valid_prefix_attributes (cp_parser *, tree *); static void cp_parser_objc_at_property_declaration (cp_parser *) ; static void cp_parser_objc_at_synthesize_declaration (cp_parser *) ; static void cp_parser_objc_at_dynamic_declaration (cp_parser *) ; static tree cp_parser_objc_struct_declaration (cp_parser *) ; /* Utility Routines */ static tree cp_parser_lookup_name (cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t); static tree cp_parser_lookup_name_simple (cp_parser *, tree, location_t); static tree cp_parser_maybe_treat_template_as_class (tree, bool); static bool cp_parser_check_declarator_template_parameters (cp_parser *, cp_declarator *, location_t); static bool cp_parser_check_template_parameters (cp_parser *, unsigned, location_t, cp_declarator *); static tree cp_parser_simple_cast_expression (cp_parser *); static tree cp_parser_global_scope_opt (cp_parser *, bool); static bool cp_parser_constructor_declarator_p (cp_parser *, bool); static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *); static tree cp_parser_function_definition_after_declarator (cp_parser *, bool); static void cp_parser_template_declaration_after_export (cp_parser *, bool); static void cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)*); static tree cp_parser_single_declaration (cp_parser *, VEC (deferred_access_check,gc)*, bool, bool, bool *); static tree cp_parser_functional_cast (cp_parser *, tree); static tree cp_parser_save_member_function_body (cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree); static tree cp_parser_save_nsdmi (cp_parser *); static tree cp_parser_enclosed_template_argument_list (cp_parser *); static void cp_parser_save_default_args (cp_parser *, tree); static void cp_parser_late_parsing_for_member (cp_parser *, tree); static tree cp_parser_late_parse_one_default_arg (cp_parser *, tree, tree, tree); static void cp_parser_late_parsing_nsdmi (cp_parser *, tree); static void cp_parser_late_parsing_default_args (cp_parser *, tree); static tree cp_parser_sizeof_operand (cp_parser *, enum rid); static tree cp_parser_trait_expr (cp_parser *, enum rid); static bool cp_parser_declares_only_class_p (cp_parser *); static void cp_parser_set_storage_class (cp_parser *, cp_decl_specifier_seq *, enum rid, location_t); static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *, tree, location_t, bool); static bool cp_parser_friend_p (const cp_decl_specifier_seq *); static void cp_parser_required_error (cp_parser *, required_token, bool); static cp_token *cp_parser_require (cp_parser *, enum cpp_ttype, required_token); static cp_token *cp_parser_require_keyword (cp_parser *, enum rid, required_token); static bool cp_parser_token_starts_function_definition_p (cp_token *); static bool cp_parser_next_token_starts_class_definition_p (cp_parser *); static bool cp_parser_next_token_ends_template_argument_p (cp_parser *); static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser *, size_t); static enum tag_types cp_parser_token_is_class_key (cp_token *); static void cp_parser_check_class_key (enum tag_types, tree type); static void cp_parser_check_access_in_redeclaration (tree type, location_t location); static bool cp_parser_optional_template_keyword (cp_parser *); static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *); static bool cp_parser_cache_group (cp_parser *, enum cpp_ttype, unsigned); static tree cp_parser_cache_defarg (cp_parser *parser, bool nsdmi); static void cp_parser_parse_tentatively (cp_parser *); static void cp_parser_commit_to_tentative_parse (cp_parser *); static void cp_parser_abort_tentative_parse (cp_parser *); static bool cp_parser_parse_definitely (cp_parser *); static inline bool cp_parser_parsing_tentatively (cp_parser *); static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser *); static void cp_parser_error (cp_parser *, const char *); static void cp_parser_name_lookup_error (cp_parser *, tree, tree, name_lookup_error, location_t); static bool cp_parser_simulate_error (cp_parser *); static bool cp_parser_check_type_definition (cp_parser *); static void cp_parser_check_for_definition_in_return_type (cp_declarator *, tree, location_t type_location); static void cp_parser_check_for_invalid_template_id (cp_parser *, tree, location_t location); static bool cp_parser_non_integral_constant_expression (cp_parser *, non_integral_constant); static void cp_parser_diagnose_invalid_type_name (cp_parser *, tree, tree, location_t); static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *); static int cp_parser_skip_to_closing_parenthesis (cp_parser *, bool, bool, bool); static void cp_parser_skip_to_end_of_statement (cp_parser *); static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *); static void cp_parser_skip_to_end_of_block_or_statement (cp_parser *); static bool cp_parser_skip_to_closing_brace (cp_parser *); static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser *); static void cp_parser_skip_to_pragma_eol (cp_parser*, cp_token *); static bool cp_parser_error_occurred (cp_parser *); static bool cp_parser_allow_gnu_extensions_p (cp_parser *); static bool cp_parser_is_pure_string_literal (cp_token *); static bool cp_parser_is_string_literal (cp_token *); static bool cp_parser_is_keyword (cp_token *, enum rid); static tree cp_parser_make_typename_type (cp_parser *, tree, tree, location_t location); static cp_declarator * cp_parser_make_indirect_declarator (enum tree_code, tree, cp_cv_quals, cp_declarator *); /* Returns nonzero if we are parsing tentatively. */ static inline bool cp_parser_parsing_tentatively (cp_parser* parser) { return parser->context->next != NULL; } /* Returns nonzero if TOKEN is a string literal. */ static bool cp_parser_is_pure_string_literal (cp_token* token) { return (token->type == CPP_STRING || token->type == CPP_STRING16 || token->type == CPP_STRING32 || token->type == CPP_WSTRING || token->type == CPP_UTF8STRING); } /* Returns nonzero if TOKEN is a string literal of a user-defined string literal. */ static bool cp_parser_is_string_literal (cp_token* token) { return (cp_parser_is_pure_string_literal (token) || token->type == CPP_STRING_USERDEF || token->type == CPP_STRING16_USERDEF || token->type == CPP_STRING32_USERDEF || token->type == CPP_WSTRING_USERDEF || token->type == CPP_UTF8STRING_USERDEF); } /* Returns nonzero if TOKEN is the indicated KEYWORD. */ static bool cp_parser_is_keyword (cp_token* token, enum rid keyword) { return token->keyword == keyword; } /* If not parsing tentatively, issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". */ static void cp_parser_error (cp_parser* parser, const char* gmsgid) { if (!cp_parser_simulate_error (parser)) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ cp_lexer_set_source_position_from_token (token); if (token->type == CPP_PRAGMA) { error_at (token->location, "%<#pragma%> is not allowed here"); cp_parser_skip_to_pragma_eol (parser, token); return; } c_parse_error (gmsgid, /* Because c_parser_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), token->u.value, token->flags); } } /* Issue an error about name-lookup failing. NAME is the IDENTIFIER_NODE DECL is the result of the lookup (as returned from cp_parser_lookup_name). DESIRED is the thing that we hoped to find. */ static void cp_parser_name_lookup_error (cp_parser* parser, tree name, tree decl, name_lookup_error desired, location_t location) { /* If name lookup completely failed, tell the user that NAME was not declared. */ if (decl == error_mark_node) { if (parser->scope && parser->scope != global_namespace) error_at (location, "%<%E::%E%> has not been declared", parser->scope, name); else if (parser->scope == global_namespace) error_at (location, "%<::%E%> has not been declared", name); else if (parser->object_scope && !CLASS_TYPE_P (parser->object_scope)) error_at (location, "request for member %qE in non-class type %qT", name, parser->object_scope); else if (parser->object_scope) error_at (location, "%<%T::%E%> has not been declared", parser->object_scope, name); else error_at (location, "%qE has not been declared", name); } else if (parser->scope && parser->scope != global_namespace) { switch (desired) { case NLE_TYPE: error_at (location, "%<%E::%E%> is not a type", parser->scope, name); break; case NLE_CXX98: error_at (location, "%<%E::%E%> is not a class or namespace", parser->scope, name); break; case NLE_NOT_CXX98: error_at (location, "%<%E::%E%> is not a class, namespace, or enumeration", parser->scope, name); break; default: gcc_unreachable (); } } else if (parser->scope == global_namespace) { switch (desired) { case NLE_TYPE: error_at (location, "%<::%E%> is not a type", name); break; case NLE_CXX98: error_at (location, "%<::%E%> is not a class or namespace", name); break; case NLE_NOT_CXX98: error_at (location, "%<::%E%> is not a class, namespace, or enumeration", name); break; default: gcc_unreachable (); } } else { switch (desired) { case NLE_TYPE: error_at (location, "%qE is not a type", name); break; case NLE_CXX98: error_at (location, "%qE is not a class or namespace", name); break; case NLE_NOT_CXX98: error_at (location, "%qE is not a class, namespace, or enumeration", name); break; default: gcc_unreachable (); } } } /* If we are parsing tentatively, remember that an error has occurred during this tentative parse. Returns true if the error was simulated; false if a message should be issued by the caller. */ static bool cp_parser_simulate_error (cp_parser* parser) { if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { parser->context->status = CP_PARSER_STATUS_KIND_ERROR; return true; } return false; } /* Check for repeated decl-specifiers. */ static void cp_parser_check_decl_spec (cp_decl_specifier_seq *decl_specs, location_t location) { int ds; for (ds = ds_first; ds != ds_last; ++ds) { unsigned count = decl_specs->specs[ds]; if (count < 2) continue; /* The "long" specifier is a special case because of "long long". */ if (ds == ds_long) { if (count > 2) error_at (location, "%<long long long%> is too long for GCC"); else pedwarn_cxx98 (location, OPT_Wlong_long, "ISO C++ 1998 does not support %<long long%>"); } else if (count > 1) { static const char *const decl_spec_names[] = { "signed", "unsigned", "short", "long", "const", "volatile", "restrict", "inline", "virtual", "explicit", "friend", "typedef", "using", "constexpr", "__complex", "__thread" }; error_at (location, "duplicate %qs", decl_spec_names[ds]); } } } /* This function is called when a type is defined. If type definitions are forbidden at this point, an error message is issued. */ static bool cp_parser_check_type_definition (cp_parser* parser) { /* If types are forbidden here, issue a message. */ if (parser->type_definition_forbidden_message) { /* Don't use `%s' to print the string, because quotations (`%<', `%>') in the message need to be interpreted. */ error (parser->type_definition_forbidden_message); return false; } return true; } /* This function is called when the DECLARATOR is processed. The TYPE was a type defined in the decl-specifiers. If it is invalid to define a type in the decl-specifiers for DECLARATOR, an error is issued. TYPE_LOCATION is the location of TYPE and is used for error reporting. */ static void cp_parser_check_for_definition_in_return_type (cp_declarator *declarator, tree type, location_t type_location) { /* [dcl.fct] forbids type definitions in return types. Unfortunately, it's not easy to know whether or not we are processing a return type until after the fact. */ while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_reference || declarator->kind == cdk_ptrmem)) declarator = declarator->declarator; if (declarator && declarator->kind == cdk_function) { error_at (type_location, "new types may not be defined in a return type"); inform (type_location, "(perhaps a semicolon is missing after the definition of %qT)", type); } } /* A type-specifier (TYPE) has been parsed which cannot be followed by "<" in any valid C++ program. If the next token is indeed "<", issue a message warning the user about what appears to be an invalid attempt to form a template-id. LOCATION is the location of the type-specifier (TYPE) */ static void cp_parser_check_for_invalid_template_id (cp_parser* parser, tree type, location_t location) { cp_token_position start = 0; if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { if (TYPE_P (type)) error_at (location, "%qT is not a template", type); else if (TREE_CODE (type) == IDENTIFIER_NODE) error_at (location, "%qE is not a template", type); else error_at (location, "invalid template-id"); /* Remember the location of the invalid "<". */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Consume the "<". */ cp_lexer_consume_token (parser->lexer); /* Parse the template arguments. */ cp_parser_enclosed_template_argument_list (parser); /* Permanently remove the invalid template arguments so that this error message is not issued again. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); } } /* If parsing an integral constant-expression, issue an error message about the fact that THING appeared and return true. Otherwise, return false. In either case, set PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */ static bool cp_parser_non_integral_constant_expression (cp_parser *parser, non_integral_constant thing) { parser->non_integral_constant_expression_p = true; if (parser->integral_constant_expression_p) { if (!parser->allow_non_integral_constant_expression_p) { const char *msg = NULL; switch (thing) { case NIC_FLOAT: error ("floating-point literal " "cannot appear in a constant-expression"); return true; case NIC_CAST: error ("a cast to a type other than an integral or " "enumeration type cannot appear in a " "constant-expression"); return true; case NIC_TYPEID: error ("%<typeid%> operator " "cannot appear in a constant-expression"); return true; case NIC_NCC: error ("non-constant compound literals " "cannot appear in a constant-expression"); return true; case NIC_FUNC_CALL: error ("a function call " "cannot appear in a constant-expression"); return true; case NIC_INC: error ("an increment " "cannot appear in a constant-expression"); return true; case NIC_DEC: error ("an decrement " "cannot appear in a constant-expression"); return true; case NIC_ARRAY_REF: error ("an array reference " "cannot appear in a constant-expression"); return true; case NIC_ADDR_LABEL: error ("the address of a label " "cannot appear in a constant-expression"); return true; case NIC_OVERLOADED: error ("calls to overloaded operators " "cannot appear in a constant-expression"); return true; case NIC_ASSIGNMENT: error ("an assignment cannot appear in a constant-expression"); return true; case NIC_COMMA: error ("a comma operator " "cannot appear in a constant-expression"); return true; case NIC_CONSTRUCTOR: error ("a call to a constructor " "cannot appear in a constant-expression"); return true; case NIC_TRANSACTION: error ("a transaction expression " "cannot appear in a constant-expression"); return true; case NIC_THIS: msg = "this"; break; case NIC_FUNC_NAME: msg = "__FUNCTION__"; break; case NIC_PRETTY_FUNC: msg = "__PRETTY_FUNCTION__"; break; case NIC_C99_FUNC: msg = "__func__"; break; case NIC_VA_ARG: msg = "va_arg"; break; case NIC_ARROW: msg = "->"; break; case NIC_POINT: msg = "."; break; case NIC_STAR: msg = "*"; break; case NIC_ADDR: msg = "&"; break; case NIC_PREINCREMENT: msg = "++"; break; case NIC_PREDECREMENT: msg = "--"; break; case NIC_NEW: msg = "new"; break; case NIC_DEL: msg = "delete"; break; default: gcc_unreachable (); } if (msg) error ("%qs cannot appear in a constant-expression", msg); return true; } } return false; } /* Emit a diagnostic for an invalid type name. SCOPE is the qualifying scope (or NULL, if none) for ID. This function commits to the current active tentative parse, if any. (Otherwise, the problematic construct might be encountered again later, resulting in duplicate error messages.) LOCATION is the location of ID. */ static void cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree scope, tree id, location_t location) { tree decl, old_scope; cp_parser_commit_to_tentative_parse (parser); /* Try to lookup the identifier. */ old_scope = parser->scope; parser->scope = scope; decl = cp_parser_lookup_name_simple (parser, id, location); parser->scope = old_scope; /* If the lookup found a template-name, it means that the user forgot to specify an argument list. Emit a useful error message. */ if (TREE_CODE (decl) == TEMPLATE_DECL) error_at (location, "invalid use of template-name %qE without an argument list", decl); else if (TREE_CODE (id) == BIT_NOT_EXPR) error_at (location, "invalid use of destructor %qD as a type", id); else if (TREE_CODE (decl) == TYPE_DECL) /* Something like 'unsigned A a;' */ error_at (location, "invalid combination of multiple type-specifiers"); else if (!parser->scope) { /* Issue an error message. */ error_at (location, "%qE does not name a type", id); /* If we're in a template class, it's possible that the user was referring to a type from a base class. For example: template <typename T> struct A { typedef T X; }; template <typename T> struct B : public A<T> { X x; }; The user should have said "typename A<T>::X". */ if (cxx_dialect < cxx0x && id == ridpointers[(int)RID_CONSTEXPR]) inform (location, "C++11 %<constexpr%> only available with " "-std=c++11 or -std=gnu++11"); else if (processing_template_decl && current_class_type && TYPE_BINFO (current_class_type)) { tree b; for (b = TREE_CHAIN (TYPE_BINFO (current_class_type)); b; b = TREE_CHAIN (b)) { tree base_type = BINFO_TYPE (b); if (CLASS_TYPE_P (base_type) && dependent_type_p (base_type)) { tree field; /* Go from a particular instantiation of the template (which will have an empty TYPE_FIELDs), to the main version. */ base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type); for (field = TYPE_FIELDS (base_type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == TYPE_DECL && DECL_NAME (field) == id) { inform (location, "(perhaps %<typename %T::%E%> was intended)", BINFO_TYPE (b), id); break; } if (field) break; } } } } /* Here we diagnose qualified-ids where the scope is actually correct, but the identifier does not resolve to a valid type name. */ else if (parser->scope != error_mark_node) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error_at (location, "%qE in namespace %qE does not name a type", id, parser->scope); else if (CLASS_TYPE_P (parser->scope) && constructor_name_p (id, parser->scope)) { /* A<T>::A<T>() */ error_at (location, "%<%T::%E%> names the constructor, not" " the type", parser->scope, id); if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) error_at (location, "and %qT has no template constructors", parser->scope); } else if (TYPE_P (parser->scope) && dependent_scope_p (parser->scope)) error_at (location, "need %<typename%> before %<%T::%E%> because " "%qT is a dependent scope", parser->scope, id, parser->scope); else if (TYPE_P (parser->scope)) error_at (location, "%qE in %q#T does not name a type", id, parser->scope); else gcc_unreachable (); } } /* Check for a common situation where a type-name should be present, but is not, and issue a sensible error message. Returns true if an invalid type-name was detected. The situation handled by this function are variable declarations of the form `ID a', where `ID' is an id-expression and `a' is a plain identifier. Usually, `ID' should name a type, but if we got here it means that it does not. We try to emit the best possible error message depending on how exactly the id-expression looks like. */ static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser) { tree id; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Avoid duplicate error about ambiguous lookup. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { cp_token *next = cp_lexer_peek_nth_token (parser->lexer, 2); if (next->type == CPP_NAME && next->ambiguous_p) goto out; } cp_parser_parse_tentatively (parser); id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/true, /*optional_p=*/false); /* If the next token is a (, this is a function with no explicit return type, i.e. constructor, destructor or conversion op. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || TREE_CODE (id) == TYPE_DECL) { cp_parser_abort_tentative_parse (parser); return false; } if (!cp_parser_parse_definitely (parser)) return false; /* Emit a diagnostic for the invalid type. */ cp_parser_diagnose_invalid_type_name (parser, parser->scope, id, token->location); out: /* If we aren't in the middle of a declarator (i.e. in a parameter-declaration-clause), skip to the end of the declaration; there's no point in trying to process it. */ if (!parser->in_declarator_p) cp_parser_skip_to_end_of_block_or_statement (parser); return true; } /* Consume tokens up to, and including, the next non-nested closing `)'. Returns 1 iff we found a closing `)'. RECOVERING is true, if we are doing error recovery. Returns -1 if OR_COMMA is true and we found an unnested comma. */ static int cp_parser_skip_to_closing_parenthesis (cp_parser *parser, bool recovering, bool or_comma, bool consume_paren) { unsigned paren_depth = 0; unsigned brace_depth = 0; unsigned square_depth = 0; if (recovering && !or_comma && cp_parser_uncommitted_to_tentative_parse_p (parser)) return 0; while (true) { cp_token * token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, then there is no closing `)'. */ return 0; /* This is good for lambda expression capture-lists. */ case CPP_OPEN_SQUARE: ++square_depth; break; case CPP_CLOSE_SQUARE: if (!square_depth--) return 0; break; case CPP_SEMICOLON: /* This matches the processing in skip_to_end_of_statement. */ if (!brace_depth) return 0; break; case CPP_OPEN_BRACE: ++brace_depth; break; case CPP_CLOSE_BRACE: if (!brace_depth--) return 0; break; case CPP_COMMA: if (recovering && or_comma && !brace_depth && !paren_depth && !square_depth) return -1; break; case CPP_OPEN_PAREN: if (!brace_depth) ++paren_depth; break; case CPP_CLOSE_PAREN: if (!brace_depth && !paren_depth--) { if (consume_paren) cp_lexer_consume_token (parser->lexer); return 1; } break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the current statement. Normally, that will be just before consuming a `;'. However, if a non-nested `}' comes first, then we stop before consuming that. */ static void cp_parser_skip_to_end_of_statement (cp_parser* parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* If the next token is a `;', we have reached the end of the statement. */ if (!nesting_depth) return; break; case CPP_CLOSE_BRACE: /* If this is a non-nested '}', stop before consuming it. That way, when confronted with something like: { 3 + } we stop before consuming the closing '}', even though we have not yet reached a `;'. */ if (nesting_depth == 0) return; /* If it is the closing '}' for a block that we have scanned, stop -- but only after consuming the token. That way given: void f g () { ... } typedef int I; we will stop after the body of the erroneously declared function, but before consuming the following `typedef' declaration. */ if (--nesting_depth == 0) { cp_lexer_consume_token (parser->lexer); return; } case CPP_OPEN_BRACE: ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* This function is called at the end of a statement or declaration. If the next token is a semicolon, it is consumed; otherwise, error recovery is attempted. */ static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser) { /* Look for the trailing `;'. */ if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) { /* If there is additional (erroneous) input, skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested `;'. */ static void cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser) { int nesting_depth = 0; while (nesting_depth >= 0) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* Stop if this is an unnested ';'. */ if (!nesting_depth) nesting_depth = -1; break; case CPP_CLOSE_BRACE: /* Stop if this is an unnested '}', or closes the outermost nesting level. */ nesting_depth--; if (nesting_depth < 0) return; if (!nesting_depth) nesting_depth = -1; break; case CPP_OPEN_BRACE: /* Nest. */ nesting_depth++; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until a non-nested closing curly brace is the next token, or there are no more tokens. Return true in the first case, false otherwise. */ static bool cp_parser_skip_to_closing_brace (cp_parser *parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return false; case CPP_CLOSE_BRACE: /* If the next token is a non-nested `}', then we have reached the end of the current block. */ if (nesting_depth-- == 0) return true; break; case CPP_OPEN_BRACE: /* If it the next token is a `{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK parameter is the PRAGMA token, allowing us to purge the entire pragma sequence. */ static void cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok) { cp_token *token; parser->lexer->in_pragma = false; do token = cp_lexer_consume_token (parser->lexer); while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF); /* Ensure that the pragma is not parsed again. */ cp_lexer_purge_tokens_after (parser->lexer, pragma_tok); } /* Require pragma end of line, resyncing with it as necessary. The arguments are as for cp_parser_skip_to_pragma_eol. */ static void cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok) { parser->lexer->in_pragma = false; if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL)) cp_parser_skip_to_pragma_eol (parser, pragma_tok); } /* This is a simple wrapper around make_typename_type. When the id is an unresolved identifier node, we can provide a superior diagnostic using cp_parser_diagnose_invalid_type_name. */ static tree cp_parser_make_typename_type (cp_parser *parser, tree scope, tree id, location_t id_location) { tree result; if (TREE_CODE (id) == IDENTIFIER_NODE) { result = make_typename_type (scope, id, typename_type, /*complain=*/tf_none); if (result == error_mark_node) cp_parser_diagnose_invalid_type_name (parser, scope, id, id_location); return result; } return make_typename_type (scope, id, typename_type, tf_error); } /* This is a wrapper around the make_{pointer,ptrmem,reference}_declarator functions that decides which one to call based on the CODE and CLASS_TYPE arguments. The CODE argument should be one of the values returned by cp_parser_ptr_operator. */ static cp_declarator * cp_parser_make_indirect_declarator (enum tree_code code, tree class_type, cp_cv_quals cv_qualifiers, cp_declarator *target) { if (code == ERROR_MARK) return cp_error_declarator; if (code == INDIRECT_REF) if (class_type == NULL_TREE) return make_pointer_declarator (cv_qualifiers, target); else return make_ptrmem_declarator (cv_qualifiers, class_type, target); else if (code == ADDR_EXPR && class_type == NULL_TREE) return make_reference_declarator (cv_qualifiers, target, false); else if (code == NON_LVALUE_EXPR && class_type == NULL_TREE) return make_reference_declarator (cv_qualifiers, target, true); gcc_unreachable (); } /* Create a new C++ parser. */ static cp_parser * cp_parser_new (void) { cp_parser *parser; cp_lexer *lexer; unsigned i; /* cp_lexer_new_main is called before doing GC allocation because cp_lexer_new_main might load a PCH file. */ lexer = cp_lexer_new_main (); /* Initialize the binops_by_token so that we can get the tree directly from the token. */ for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++) binops_by_token[binops[i].token_type] = binops[i]; parser = ggc_alloc_cleared_cp_parser (); parser->lexer = lexer; parser->context = cp_parser_context_new (NULL); /* For now, we always accept GNU extensions. */ parser->allow_gnu_extensions_p = 1; /* The `>' token is a greater-than operator, not the end of a template-id. */ parser->greater_than_is_operator_p = true; parser->default_arg_ok_p = true; /* We are not parsing a constant-expression. */ parser->integral_constant_expression_p = false; parser->allow_non_integral_constant_expression_p = false; parser->non_integral_constant_expression_p = false; /* Local variable names are not forbidden. */ parser->local_variables_forbidden_p = false; /* We are not processing an `extern "C"' declaration. */ parser->in_unbraced_linkage_specification_p = false; /* We are not processing a declarator. */ parser->in_declarator_p = false; /* We are not processing a template-argument-list. */ parser->in_template_argument_list_p = false; /* We are not in an iteration statement. */ parser->in_statement = 0; /* We are not in a switch statement. */ parser->in_switch_statement_p = false; /* We are not parsing a type-id inside an expression. */ parser->in_type_id_in_expr_p = false; /* Declarations aren't implicitly extern "C". */ parser->implicit_extern_c = false; /* String literals should be translated to the execution character set. */ parser->translate_strings_p = true; /* We are not parsing a function body. */ parser->in_function_body = false; /* We can correct until told otherwise. */ parser->colon_corrects_to_scope_p = true; /* The unparsed function queue is empty. */ push_unparsed_function_queues (parser); /* There are no classes being defined. */ parser->num_classes_being_defined = 0; /* No template parameters apply. */ parser->num_template_parameter_lists = 0; return parser; } /* Create a cp_lexer structure which will emit the tokens in CACHE and push it onto the parser's lexer stack. This is used for delayed parsing of in-class method bodies and default arguments, and should not be confused with tentative parsing. */ static void cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache) { cp_lexer *lexer = cp_lexer_new_from_tokens (cache); lexer->next = parser->lexer; parser->lexer = lexer; /* Move the current source position to that of the first token in the new lexer. */ cp_lexer_set_source_position_from_token (lexer->next_token); } /* Pop the top lexer off the parser stack. This is never used for the "main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */ static void cp_parser_pop_lexer (cp_parser *parser) { cp_lexer *lexer = parser->lexer; parser->lexer = lexer->next; cp_lexer_destroy (lexer); /* Put the current source position back where it was before this lexer was pushed. */ cp_lexer_set_source_position_from_token (parser->lexer->next_token); } /* Lexical conventions [gram.lex] */ /* Parse an identifier. Returns an IDENTIFIER_NODE representing the identifier. */ static tree cp_parser_identifier (cp_parser* parser) { cp_token *token; /* Look for the identifier. */ token = cp_parser_require (parser, CPP_NAME, RT_NAME); /* Return the value. */ return token ? token->u.value : error_mark_node; } /* Parse a sequence of adjacent string constants. Returns a TREE_STRING representing the combined, nul-terminated string constant. If TRANSLATE is true, translate the string to the execution character set. If WIDE_OK is true, a wide string is invalid here. C++98 [lex.string] says that if a narrow string literal token is adjacent to a wide string literal token, the behavior is undefined. However, C99 6.4.5p4 says that this results in a wide string literal. We follow C99 here, for consistency with the C front end. This code is largely lifted from lex_string() in c-lex.c. FUTURE: ObjC++ will need to handle @-strings here. */ static tree cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok) { tree value; size_t count; struct obstack str_ob; cpp_string str, istr, *strs; cp_token *tok; enum cpp_ttype type, curr_type; int have_suffix_p = 0; tree string_tree; tree suffix_id = NULL_TREE; bool curr_tok_is_userdef_p = false; tok = cp_lexer_peek_token (parser->lexer); if (!cp_parser_is_string_literal (tok)) { cp_parser_error (parser, "expected string-literal"); return error_mark_node; } if (cpp_userdef_string_p (tok->type)) { string_tree = USERDEF_LITERAL_VALUE (tok->u.value); curr_type = cpp_userdef_string_remove_type (tok->type); curr_tok_is_userdef_p = true; } else { string_tree = tok->u.value; curr_type = tok->type; } type = curr_type; /* Try to avoid the overhead of creating and destroying an obstack for the common case of just one string. */ if (!cp_parser_is_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) { cp_lexer_consume_token (parser->lexer); str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree); str.len = TREE_STRING_LENGTH (string_tree); count = 1; if (curr_tok_is_userdef_p) { suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value); have_suffix_p = 1; curr_type = cpp_userdef_string_remove_type (tok->type); } else curr_type = tok->type; strs = &str; } else { gcc_obstack_init (&str_ob); count = 0; do { cp_lexer_consume_token (parser->lexer); count++; str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree); str.len = TREE_STRING_LENGTH (string_tree); if (curr_tok_is_userdef_p) { tree curr_suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value); if (have_suffix_p == 0) { suffix_id = curr_suffix_id; have_suffix_p = 1; } else if (have_suffix_p == 1 && curr_suffix_id != suffix_id) { error ("inconsistent user-defined literal suffixes" " %qD and %qD in string literal", suffix_id, curr_suffix_id); have_suffix_p = -1; } curr_type = cpp_userdef_string_remove_type (tok->type); } else curr_type = tok->type; if (type != curr_type) { if (type == CPP_STRING) type = curr_type; else if (curr_type != CPP_STRING) error_at (tok->location, "unsupported non-standard concatenation " "of string literals"); } obstack_grow (&str_ob, &str, sizeof (cpp_string)); tok = cp_lexer_peek_token (parser->lexer); if (cpp_userdef_string_p (tok->type)) { string_tree = USERDEF_LITERAL_VALUE (tok->u.value); curr_type = cpp_userdef_string_remove_type (tok->type); curr_tok_is_userdef_p = true; } else { string_tree = tok->u.value; curr_type = tok->type; curr_tok_is_userdef_p = false; } } while (cp_parser_is_string_literal (tok)); strs = (cpp_string *) obstack_finish (&str_ob); } if (type != CPP_STRING && !wide_ok) { cp_parser_error (parser, "a wide string is invalid in this context"); type = CPP_STRING; } if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate) (parse_in, strs, count, &istr, type)) { value = build_string (istr.len, (const char *)istr.text); free (CONST_CAST (unsigned char *, istr.text)); switch (type) { default: case CPP_STRING: case CPP_UTF8STRING: TREE_TYPE (value) = char_array_type_node; break; case CPP_STRING16: TREE_TYPE (value) = char16_array_type_node; break; case CPP_STRING32: TREE_TYPE (value) = char32_array_type_node; break; case CPP_WSTRING: TREE_TYPE (value) = wchar_array_type_node; break; } value = fix_string_type (value); if (have_suffix_p) { tree literal = build_userdef_literal (suffix_id, value, NULL_TREE); tok->u.value = literal; return cp_parser_userdef_string_literal (tok); } } else /* cpp_interpret_string has issued an error. */ value = error_mark_node; if (count > 1) obstack_free (&str_ob, 0); return value; } /* Look up a literal operator with the name and the exact arguments. */ static tree lookup_literal_operator (tree name, VEC(tree,gc) *args) { tree decl, fns; decl = lookup_name (name); if (!decl || !is_overloaded_fn (decl)) return error_mark_node; for (fns = decl; fns; fns = OVL_NEXT (fns)) { unsigned int ix; bool found = true; tree fn = OVL_CURRENT (fns); tree argtypes = NULL_TREE; argtypes = TYPE_ARG_TYPES (TREE_TYPE (fn)); if (argtypes != NULL_TREE) { for (ix = 0; ix < VEC_length (tree, args) && argtypes != NULL_TREE; ++ix, argtypes = TREE_CHAIN (argtypes)) { tree targ = TREE_VALUE (argtypes); tree tparm = TREE_TYPE (VEC_index (tree, args, ix)); bool ptr = TREE_CODE (targ) == POINTER_TYPE; bool arr = TREE_CODE (tparm) == ARRAY_TYPE; if ((ptr || arr || !same_type_p (targ, tparm)) && (!ptr || !arr || !same_type_p (TREE_TYPE (targ), TREE_TYPE (tparm)))) found = false; } if (found && ix == VEC_length (tree, args) /* May be this should be sufficient_parms_p instead, depending on how exactly should user-defined literals work in presence of default arguments on the literal operator parameters. */ && argtypes == void_list_node) return fn; } } return error_mark_node; } /* Parse a user-defined char constant. Returns a call to a user-defined literal operator taking the character as an argument. */ static tree cp_parser_userdef_char_literal (cp_parser *parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree value = USERDEF_LITERAL_VALUE (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree decl, result; /* Build up a call to the user-defined operator */ /* Lookup the name we got back from the id-expression. */ VEC(tree,gc) *args = make_tree_vector (); VEC_safe_push (tree, gc, args, value); decl = lookup_literal_operator (name, args); if (!decl || decl == error_mark_node) { error ("unable to find character literal operator %qD with %qT argument", name, TREE_TYPE (value)); release_tree_vector (args); return error_mark_node; } result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); if (result != error_mark_node) return result; error ("unable to find character literal operator %qD with %qT argument", name, TREE_TYPE (value)); return error_mark_node; } /* A subroutine of cp_parser_userdef_numeric_literal to create a char... template parameter pack from a string node. */ static tree make_char_string_pack (tree value) { tree charvec; tree argpack = make_node (NONTYPE_ARGUMENT_PACK); const char *str = TREE_STRING_POINTER (value); int i, len = TREE_STRING_LENGTH (value) - 1; tree argvec = make_tree_vec (1); /* Fill in CHARVEC with all of the parameters. */ charvec = make_tree_vec (len); for (i = 0; i < len; ++i) TREE_VEC_ELT (charvec, i) = build_int_cst (char_type_node, str[i]); /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, charvec); TREE_TYPE (argpack) = char_type_node; TREE_VEC_ELT (argvec, 0) = argpack; return argvec; } /* Parse a user-defined numeric constant. returns a call to a user-defined literal operator. */ static tree cp_parser_userdef_numeric_literal (cp_parser *parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree value = USERDEF_LITERAL_VALUE (literal); tree num_string = USERDEF_LITERAL_NUM_STRING (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree decl, result; VEC(tree,gc) *args; /* Look for a literal operator taking the exact type of numeric argument as the literal value. */ args = make_tree_vector (); VEC_safe_push (tree, gc, args, value); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_none); if (result != error_mark_node) { release_tree_vector (args); return result; } } release_tree_vector (args); /* If the numeric argument didn't work, look for a raw literal operator taking a const char* argument consisting of the number in string format. */ args = make_tree_vector (); VEC_safe_push (tree, gc, args, num_string); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_none); if (result != error_mark_node) { release_tree_vector (args); return result; } } release_tree_vector (args); /* If the raw literal didn't work, look for a non-type template function with parameter pack char.... Call the function with template parameter characters representing the number. */ args = make_tree_vector (); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { tree tmpl_args = make_char_string_pack (num_string); decl = lookup_template_function (decl, tmpl_args); result = finish_call_expr (decl, &args, false, true, tf_none); if (result != error_mark_node) { release_tree_vector (args); return result; } } release_tree_vector (args); error ("unable to find numeric literal operator %qD", name); return error_mark_node; } /* Parse a user-defined string constant. Returns a call to a user-defined literal operator taking a character pointer and the length of the string as arguments. */ static tree cp_parser_userdef_string_literal (cp_token *token) { tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree value = USERDEF_LITERAL_VALUE (literal); int len = TREE_STRING_LENGTH (value) / TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; tree decl, result; /* Build up a call to the user-defined operator */ /* Lookup the name we got back from the id-expression. */ VEC(tree,gc) *args = make_tree_vector (); VEC_safe_push (tree, gc, args, value); VEC_safe_push (tree, gc, args, build_int_cst (size_type_node, len)); decl = lookup_name (name); if (!decl || decl == error_mark_node) { error ("unable to find string literal operator %qD", name); release_tree_vector (args); return error_mark_node; } result = finish_call_expr (decl, &args, false, true, tf_none); release_tree_vector (args); if (result != error_mark_node) return result; error ("unable to find string literal operator %qD with %qT, %qT arguments", name, TREE_TYPE (value), size_type_node); return error_mark_node; } /* Basic concepts [gram.basic] */ /* Parse a translation-unit. translation-unit: declaration-seq [opt] Returns TRUE if all went well. */ static bool cp_parser_translation_unit (cp_parser* parser) { /* The address of the first non-permanent object on the declarator obstack. */ static void *declarator_obstack_base; bool success; /* Create the declarator obstack, if necessary. */ if (!cp_error_declarator) { gcc_obstack_init (&declarator_obstack); /* Create the error declarator. */ cp_error_declarator = make_declarator (cdk_error); /* Create the empty parameter list. */ no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE); /* Remember where the base of the declarator obstack lies. */ declarator_obstack_base = obstack_next_free (&declarator_obstack); } cp_parser_declaration_seq_opt (parser); /* If there are no tokens left then all went well. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { /* Get rid of the token array; we don't need it any more. */ cp_lexer_destroy (parser->lexer); parser->lexer = NULL; /* This file might have been a context that's implicitly extern "C". If so, pop the lang context. (Only relevant for PCH.) */ if (parser->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } /* Finish up. */ finish_translation_unit (); success = true; } else { cp_parser_error (parser, "expected declaration"); success = false; } /* Make sure the declarator obstack was fully cleaned up. */ gcc_assert (obstack_next_free (&declarator_obstack) == declarator_obstack_base); /* All went well. */ return success; } /* Expressions [gram.expr] */ /* Parse a primary-expression. primary-expression: literal this ( expression ) id-expression GNU Extensions: primary-expression: ( compound-statement ) __builtin_va_arg ( assignment-expression , type-id ) __builtin_offsetof ( type-id , offsetof-expression ) C++ Extensions: __has_nothrow_assign ( type-id ) __has_nothrow_constructor ( type-id ) __has_nothrow_copy ( type-id ) __has_trivial_assign ( type-id ) __has_trivial_constructor ( type-id ) __has_trivial_copy ( type-id ) __has_trivial_destructor ( type-id ) __has_virtual_destructor ( type-id ) __is_abstract ( type-id ) __is_base_of ( type-id , type-id ) __is_class ( type-id ) __is_convertible_to ( type-id , type-id ) __is_empty ( type-id ) __is_enum ( type-id ) __is_final ( type-id ) __is_literal_type ( type-id ) __is_pod ( type-id ) __is_polymorphic ( type-id ) __is_std_layout ( type-id ) __is_trivial ( type-id ) __is_union ( type-id ) Objective-C++ Extension: primary-expression: objc-expression literal: __null ADDRESS_P is true iff this expression was immediately preceded by "&" and therefore might denote a pointer-to-member. CAST_P is true iff this expression is the target of a cast. TEMPLATE_ARG_P is true iff this expression is a template argument. Returns a representation of the expression. Upon return, *IDK indicates what kind of id-expression (if any) was present. */ static tree cp_parser_primary_expression (cp_parser *parser, bool address_p, bool cast_p, bool template_arg_p, cp_id_kind *idk) { cp_token *token = NULL; /* Assume the primary expression is not an id-expression. */ *idk = CP_ID_KIND_NONE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { /* literal: integer-literal character-literal floating-literal string-literal boolean-literal pointer-literal user-defined-literal */ case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: case CPP_NUMBER: if (TREE_CODE (token->u.value) == USERDEF_LITERAL) return cp_parser_userdef_numeric_literal (parser); token = cp_lexer_consume_token (parser->lexer); if (TREE_CODE (token->u.value) == FIXED_CST) { error_at (token->location, "fixed-point types not supported in C++"); return error_mark_node; } /* Floating-point literals are only allowed in an integral constant expression if they are cast to an integral or enumeration type. */ if (TREE_CODE (token->u.value) == REAL_CST && parser->integral_constant_expression_p && pedantic) { /* CAST_P will be set even in invalid code like "int(2.7 + ...)". Therefore, we have to check that the next token is sure to end the cast. */ if (cast_p) { cp_token *next_token; next_token = cp_lexer_peek_token (parser->lexer); if (/* The comma at the end of an enumerator-definition. */ next_token->type != CPP_COMMA /* The curly brace at the end of an enum-specifier. */ && next_token->type != CPP_CLOSE_BRACE /* The end of a statement. */ && next_token->type != CPP_SEMICOLON /* The end of the cast-expression. */ && next_token->type != CPP_CLOSE_PAREN /* The end of an array bound. */ && next_token->type != CPP_CLOSE_SQUARE /* The closing ">" in a template-argument-list. */ && (next_token->type != CPP_GREATER || parser->greater_than_is_operator_p) /* C++0x only: A ">>" treated like two ">" tokens, in a template-argument-list. */ && (next_token->type != CPP_RSHIFT || (cxx_dialect == cxx98) || parser->greater_than_is_operator_p)) cast_p = false; } /* If we are within a cast, then the constraint that the cast is to an integral or enumeration type will be checked at that point. If we are not within a cast, then this code is invalid. */ if (!cast_p) cp_parser_non_integral_constant_expression (parser, NIC_FLOAT); } return token->u.value; case CPP_CHAR_USERDEF: case CPP_CHAR16_USERDEF: case CPP_CHAR32_USERDEF: case CPP_WCHAR_USERDEF: return cp_parser_userdef_char_literal (parser); case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: case CPP_STRING_USERDEF: case CPP_STRING16_USERDEF: case CPP_STRING32_USERDEF: case CPP_WSTRING_USERDEF: case CPP_UTF8STRING_USERDEF: /* ??? Should wide strings be allowed when parser->translate_strings_p is false (i.e. in attributes)? If not, we can kill the third argument to cp_parser_string_literal. */ return cp_parser_string_literal (parser, parser->translate_strings_p, true); case CPP_OPEN_PAREN: { tree expr; bool saved_greater_than_is_operator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* If we see `( { ' then we are looking at the beginning of a GNU statement-expression. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* Statement-expressions are not allowed by the standard. */ pedwarn (token->location, OPT_pedantic, "ISO C++ forbids braced-groups within expressions"); /* And they're not allowed outside of a function-body; you cannot, for example, write: int i = ({ int j = 3; j + 1; }); at class or namespace scope. */ if (!parser->in_function_body || parser->in_template_argument_list_p) { error_at (token->location, "statement-expressions are not allowed outside " "functions nor in template-argument lists"); cp_parser_skip_to_end_of_block_or_statement (parser); expr = error_mark_node; } else { /* Start the statement-expression. */ expr = begin_stmt_expr (); /* Parse the compound-statement. */ cp_parser_compound_statement (parser, expr, false, false); /* Finish up. */ expr = finish_stmt_expr (expr, false); } } else { /* Parse the parenthesized expression. */ expr = cp_parser_expression (parser, cast_p, idk); /* Let the front end know that this expression was enclosed in parentheses. This matters in case, for example, the expression is of the form `A::B', since `&A::B' might be a pointer-to-member, but `&(A::B)' is not. */ finish_parenthesized_expr (expr); /* DR 705: Wrapping an unqualified name in parentheses suppresses arg-dependent lookup. We want to pass back CP_ID_KIND_QUALIFIED for suppressing vtable lookup (c++/37862), but none of the others. */ if (*idk != CP_ID_KIND_QUALIFIED) *idk = CP_ID_KIND_NONE; } /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Consume the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_end_of_statement (parser); return expr; } case CPP_OPEN_SQUARE: if (c_dialect_objc ()) /* We have an Objective-C++ message. */ return cp_parser_objc_expression (parser); { tree lam = cp_parser_lambda_expression (parser); /* Don't warn about a failed tentative parse. */ if (cp_parser_error_occurred (parser)) return error_mark_node; maybe_warn_cpp0x (CPP0X_LAMBDA_EXPR); return lam; } case CPP_OBJC_STRING: if (c_dialect_objc ()) /* We have an Objective-C++ string literal. */ return cp_parser_objc_expression (parser); cp_parser_error (parser, "expected primary-expression"); return error_mark_node; case CPP_KEYWORD: switch (token->keyword) { /* These two are the boolean literals. */ case RID_TRUE: cp_lexer_consume_token (parser->lexer); return boolean_true_node; case RID_FALSE: cp_lexer_consume_token (parser->lexer); return boolean_false_node; /* The `__null' literal. */ case RID_NULL: cp_lexer_consume_token (parser->lexer); return null_node; /* The `nullptr' literal. */ case RID_NULLPTR: cp_lexer_consume_token (parser->lexer); return nullptr_node; /* Recognize the `this' keyword. */ case RID_THIS: cp_lexer_consume_token (parser->lexer); if (parser->local_variables_forbidden_p) { error_at (token->location, "%<this%> may not be used in this context"); return error_mark_node; } /* Pointers cannot appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_THIS)) return error_mark_node; return finish_this_expr (); /* The `operator' keyword can be the beginning of an id-expression. */ case RID_OPERATOR: goto id_expression; case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: { non_integral_constant name; /* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and __func__ are the names of variables -- but they are treated specially. Therefore, they are handled here, rather than relying on the generic id-expression logic below. Grammatically, these names are id-expressions. Consume the token. */ token = cp_lexer_consume_token (parser->lexer); switch (token->keyword) { case RID_FUNCTION_NAME: name = NIC_FUNC_NAME; break; case RID_PRETTY_FUNCTION_NAME: name = NIC_PRETTY_FUNC; break; case RID_C99_FUNCTION_NAME: name = NIC_C99_FUNC; break; default: gcc_unreachable (); } if (cp_parser_non_integral_constant_expression (parser, name)) return error_mark_node; /* Look up the name. */ return finish_fname (token->u.value); } case RID_VA_ARG: { tree expression; tree type; /* The `__builtin_va_arg' construct is used to handle `va_arg'. Consume the `__builtin_va_arg' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Now, parse the assignment-expression. */ expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Using `va_arg' in a constant-expression is not allowed. */ if (cp_parser_non_integral_constant_expression (parser, NIC_VA_ARG)) return error_mark_node; return build_x_va_arg (expression, type); } case RID_OFFSETOF: return cp_parser_builtin_offsetof (parser); case RID_HAS_NOTHROW_ASSIGN: case RID_HAS_NOTHROW_CONSTRUCTOR: case RID_HAS_NOTHROW_COPY: case RID_HAS_TRIVIAL_ASSIGN: case RID_HAS_TRIVIAL_CONSTRUCTOR: case RID_HAS_TRIVIAL_COPY: case RID_HAS_TRIVIAL_DESTRUCTOR: case RID_HAS_VIRTUAL_DESTRUCTOR: case RID_IS_ABSTRACT: case RID_IS_BASE_OF: case RID_IS_CLASS: case RID_IS_CONVERTIBLE_TO: case RID_IS_EMPTY: case RID_IS_ENUM: case RID_IS_FINAL: case RID_IS_LITERAL_TYPE: case RID_IS_POD: case RID_IS_POLYMORPHIC: case RID_IS_STD_LAYOUT: case RID_IS_TRIVIAL: case RID_IS_UNION: return cp_parser_trait_expr (parser, token->keyword); /* Objective-C++ expressions. */ case RID_AT_ENCODE: case RID_AT_PROTOCOL: case RID_AT_SELECTOR: return cp_parser_objc_expression (parser); case RID_TEMPLATE: if (parser->in_function_body && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS)) { error_at (token->location, "a template declaration cannot appear at block scope"); cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } /* An id-expression can start with either an identifier, a `::' as the beginning of a qualified-id, or the "operator" keyword. */ case CPP_NAME: case CPP_SCOPE: case CPP_TEMPLATE_ID: case CPP_NESTED_NAME_SPECIFIER: { tree id_expression; tree decl; const char *error_msg; bool template_p; bool done; cp_token *id_expr_token; id_expression: /* Parse the id-expression. */ id_expression = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); if (id_expression == error_mark_node) return error_mark_node; id_expr_token = token; token = cp_lexer_peek_token (parser->lexer); done = (token->type != CPP_OPEN_SQUARE && token->type != CPP_OPEN_PAREN && token->type != CPP_DOT && token->type != CPP_DEREF && token->type != CPP_PLUS_PLUS && token->type != CPP_MINUS_MINUS); /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR || TREE_CODE (id_expression) == TYPE_DECL) decl = id_expression; /* Look up the name. */ else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (id_expr_token->type == CPP_NAME && id_expr_token->ambiguous_p) { cp_parser_simulate_error (parser); return error_mark_node; } decl = cp_parser_lookup_name (parser, id_expression, none_type, template_p, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, id_expr_token->location); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* In Objective-C++, we may have an Objective-C 2.0 dot-syntax for classes here. */ if (c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && TREE_CODE (decl) == TYPE_DECL && objc_is_class_name (decl)) { tree component; cp_lexer_consume_token (parser->lexer); component = cp_parser_identifier (parser); if (component == error_mark_node) return error_mark_node; return objc_build_class_component_ref (id_expression, component); } /* In Objective-C++, an instance variable (ivar) may be preferred to whatever cp_parser_lookup_name() found. */ decl = objc_lookup_ivar (decl, id_expression); /* If name lookup gives us a SCOPE_REF, then the qualifying scope was dependent. */ if (TREE_CODE (decl) == SCOPE_REF) { /* At this point, we do not know if DECL is a valid integral constant expression. We assume that it is in fact such an expression, so that code like: template <int N> struct A { int a[B<N>::i]; }; is accepted. At template-instantiation time, we will check that B<N>::i is actually a constant. */ return decl; } /* Check to see if DECL is a local variable in a context where that is forbidden. */ if (parser->local_variables_forbidden_p && local_variable_p (decl)) { /* It might be that we only found DECL because we are trying to be generous with pre-ISO scoping rules. For example, consider: int i; void g() { for (int i = 0; i < 10; ++i) {} extern void f(int j = i); } Here, name look up will originally find the out of scope `i'. We need to issue a warning message, but then use the global `i'. */ decl = check_for_out_of_scope_variable (decl); if (local_variable_p (decl)) { error_at (id_expr_token->location, "local variable %qD may not appear in this context", decl); return error_mark_node; } } } decl = (finish_id_expression (id_expression, decl, parser->scope, idk, parser->integral_constant_expression_p, parser->allow_non_integral_constant_expression_p, &parser->non_integral_constant_expression_p, template_p, done, address_p, template_arg_p, &error_msg, id_expr_token->location)); if (error_msg) cp_parser_error (parser, error_msg); return decl; } /* Anything else is an error. */ default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } } /* Parse an id-expression. id-expression: unqualified-id qualified-id qualified-id: :: [opt] nested-name-specifier template [opt] unqualified-id :: identifier :: operator-function-id :: template-id Return a representation of the unqualified portion of the identifier. Sets PARSER->SCOPE to the qualifying scope if there is a `::' or nested-name-specifier. Often, if the id-expression was a qualified-id, the caller will want to make a SCOPE_REF to represent the qualified-id. This function does not do this in order to avoid wastefully creating SCOPE_REFs when they are not required. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword. If CHECK_DEPENDENCY_P is false, then names are looked up inside uninstantiated templates. If *TEMPLATE_P is non-NULL, it is set to true iff the `template' keyword is used to explicitly indicate that the entity named is a template. If DECLARATOR_P is true, the id-expression is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_id_expression (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool *template_p, bool declarator_p, bool optional_p) { bool global_scope_p; bool nested_name_specifier_p; /* Assume the `template' keyword was not used. */ if (template_p) *template_p = template_keyword_p; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, check_dependency_p, /*type_p=*/false, declarator_p) != NULL_TREE); /* If there is a nested-name-specifier, then we are looking at the first qualified-id production. */ if (nested_name_specifier_p) { tree saved_scope; tree saved_object_scope; tree saved_qualifying_scope; tree unqualified_id; bool is_template; /* See if the next token is the `template' keyword. */ if (!template_p) template_p = &is_template; *template_p = cp_parser_optional_template_keyword (parser); /* Name lookup we do during the processing of the unqualified-id might obliterate SCOPE. */ saved_scope = parser->scope; saved_object_scope = parser->object_scope; saved_qualifying_scope = parser->qualifying_scope; /* Process the final unqualified-id. */ unqualified_id = cp_parser_unqualified_id (parser, *template_p, check_dependency_p, declarator_p, /*optional_p=*/false); /* Restore the SAVED_SCOPE for our caller. */ parser->scope = saved_scope; parser->object_scope = saved_object_scope; parser->qualifying_scope = saved_qualifying_scope; return unqualified_id; } /* Otherwise, if we are in global scope, then we are looking at one of the other qualified-id productions. */ else if (global_scope_p) { cp_token *token; tree id; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an identifier, and the next token is not a "<", then we can avoid the template-id case. This is an optimization for this common case. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) return cp_parser_identifier (parser); cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Peek at the next token. (Changes in the token buffer may have invalidated the pointer obtained above.) */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: return cp_parser_identifier (parser); case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) return cp_parser_operator_function_id (parser); /* Fall through. */ default: cp_parser_error (parser, "expected id-expression"); return error_mark_node; } } else return cp_parser_unqualified_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p, optional_p); } /* Parse an unqualified-id. unqualified-id: identifier operator-function-id conversion-function-id ~ class-name template-id If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template' keyword, in a construct like `A::template ...'. Returns a representation of unqualified-id. For the `identifier' production, an IDENTIFIER_NODE is returned. For the `~ class-name' production a BIT_NOT_EXPR is returned; the operand of the BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the other productions, see the documentation accompanying the corresponding parsing functions. If CHECK_DEPENDENCY_P is false, names are looked up in uninstantiated templates. If DECLARATOR_P is true, the unqualified-id is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_unqualified_id (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool declarator_p, bool optional_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: { tree id; /* We don't know yet whether or not this will be a template-id. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, declarator_p); /* If it worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, it's an ordinary identifier. */ return cp_parser_identifier (parser); } case CPP_TEMPLATE_ID: return cp_parser_template_id (parser, template_keyword_p, check_dependency_p, declarator_p); case CPP_COMPL: { tree type_decl; tree qualifying_scope; tree object_scope; tree scope; bool done; /* Consume the `~' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the class-name. The standard, as written, seems to say that: template <typename T> struct S { ~S (); }; template <typename T> S<T>::~S() {} is invalid, since `~' must be followed by a class-name, but `S<T>' is dependent, and so not known to be a class. That's not right; we need to look in uninstantiated templates. A further complication arises from: template <typename T> void f(T t) { t.T::~T(); } Here, it is not possible to look up `T' in the scope of `T' itself. We must look in both the current scope, and the scope of the containing complete expression. Yet another issue is: struct S { int S; ~S(); }; S::~S() {} The standard does not seem to say that the `S' in `~S' should refer to the type `S' and not the data member `S::S'. */ /* DR 244 says that we look up the name after the "~" in the same scope as we looked up the qualifying name. That idea isn't fully worked out; it's more complicated than that. */ scope = parser->scope; object_scope = parser->object_scope; qualifying_scope = parser->qualifying_scope; /* Check for invalid scopes. */ if (scope == error_mark_node) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } if (scope && TREE_CODE (scope) == NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "scope %qT before %<~%> is not a class-name", scope); cp_parser_simulate_error (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } gcc_assert (!scope || TYPE_P (scope)); /* If the name is of the form "X::~X" it's OK even if X is a typedef. */ token = cp_lexer_peek_token (parser->lexer); if (scope && token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_LESS) && (token->u.value == TYPE_IDENTIFIER (scope) || (CLASS_TYPE_P (scope) && constructor_name_p (token->u.value, scope)))) { cp_lexer_consume_token (parser->lexer); return build_nt (BIT_NOT_EXPR, scope); } /* If there was an explicit qualification (S::~T), first look in the scope given by the qualification (i.e., S). Note: in the calls to cp_parser_class_name below we pass typename_type so that lookup finds the injected-class-name rather than the constructor. */ done = false; type_decl = NULL_TREE; if (scope) { cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "N::S::~S", look in "N" as well. */ if (!done && scope && qualifying_scope) { cp_parser_parse_tentatively (parser); parser->scope = qualifying_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "p->S::~T", look in the scope given by "*p" as well. */ else if (!done && object_scope) { cp_parser_parse_tentatively (parser); parser->scope = object_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* Look in the surrounding context. */ if (!done) { parser->scope = NULL_TREE; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; if (processing_template_decl) cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (processing_template_decl && ! cp_parser_parse_definitely (parser)) { /* We couldn't find a type with this name, so just accept it and check for a match at instantiation time. */ type_decl = cp_parser_identifier (parser); if (type_decl != error_mark_node) type_decl = build_nt (BIT_NOT_EXPR, type_decl); return type_decl; } } /* If an error occurred, assume that the name of the destructor is the same as the name of the qualifying class. That allows us to keep parsing after running into ill-formed destructor names. */ if (type_decl == error_mark_node && scope) return build_nt (BIT_NOT_EXPR, scope); else if (type_decl == error_mark_node) return error_mark_node; /* Check that destructor name and scope match. */ if (declarator_p && scope && !check_dtor_name (scope, type_decl)) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "declaration of %<~%T%> as member of %qT", type_decl, scope); cp_parser_simulate_error (parser); return error_mark_node; } /* [class.dtor] A typedef-name that names a class shall not be used as the identifier in the declarator for a destructor declaration. */ if (declarator_p && !DECL_IMPLICIT_TYPEDEF_P (type_decl) && !DECL_SELF_REFERENCE_P (type_decl) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "typedef-name %qD used as destructor declarator", type_decl); return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl)); } case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) { tree id; /* This could be a template-id, so we try that first. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* We still don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ id = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) id = cp_parser_conversion_function_id (parser); else if (UDLIT_OPER_P (id)) { /* 17.6.3.3.5 */ const char *name = UDLIT_OP_SUFFIX (id); if (name[0] != '_' && !in_system_header) warning (0, "literal operator suffixes not preceded by %<_%>" " are reserved for future standardization"); } return id; } /* Fall through. */ default: if (optional_p) return NULL_TREE; cp_parser_error (parser, "expected unqualified-id"); return error_mark_node; } } /* Parse an (optional) nested-name-specifier. nested-name-specifier: [C++98] class-or-namespace-name :: nested-name-specifier [opt] class-or-namespace-name :: template nested-name-specifier [opt] nested-name-specifier: [C++0x] type-name :: namespace-name :: nested-name-specifier identifier :: nested-name-specifier template [opt] simple-template-id :: PARSER->SCOPE should be set appropriately before this function is called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in effect. TYPE_P is TRUE if we non-type bindings should be ignored in name lookups. Sets PARSER->SCOPE to the class (TYPE) or namespace (NAMESPACE_DECL) specified by the nested-name-specifier, or leaves it unchanged if there is no nested-name-specifier. Returns the new scope iff there is a nested-name-specifier, or NULL_TREE otherwise. If IS_DECLARATION is TRUE, the nested-name-specifier is known to be part of a declaration and/or decl-specifier. */ static tree cp_parser_nested_name_specifier_opt (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { bool success = false; cp_token_position start = 0; cp_token *token; /* Remember where the nested-name-specifier starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { start = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); } while (true) { tree new_scope; tree old_scope; tree saved_qualifying_scope; bool template_keyword_p; /* Spot cases that cannot be the beginning of a nested-name-specifier. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is CPP_NESTED_NAME_SPECIFIER, just process the already parsed nested-name-specifier. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { /* Grab the nested-name-specifier and continue the loop. */ cp_parser_pre_parsed_nested_name_specifier (parser); /* If we originally encountered this nested-name-specifier with IS_DECLARATION set to false, we will not have resolved TYPENAME_TYPEs, so we must do so here. */ if (is_declaration && TREE_CODE (parser->scope) == TYPENAME_TYPE) { new_scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); if (TREE_CODE (new_scope) != TYPENAME_TYPE) parser->scope = new_scope; } success = true; continue; } /* Spot cases that cannot be the beginning of a nested-name-specifier. On the second and subsequent times through the loop, we look for the `template' keyword. */ if (success && token->keyword == RID_TEMPLATE) ; /* A template-id can start a nested-name-specifier. */ else if (token->type == CPP_TEMPLATE_ID) ; /* DR 743: decltype can be used in a nested-name-specifier. */ else if (token_is_decltype (token)) ; else { /* If the next token is not an identifier, then it is definitely not a type-name or namespace-name. */ if (token->type != CPP_NAME) break; /* If the following token is neither a `<' (to begin a template-id), nor a `::', then we are not looking at a nested-name-specifier. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON && parser->colon_corrects_to_scope_p && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_NAME) { error_at (token->location, "found %<:%> in nested-name-specifier, expected %<::%>"); token->type = CPP_SCOPE; } if (token->type != CPP_SCOPE && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) break; } /* The nested-name-specifier is optional, so we parse tentatively. */ cp_parser_parse_tentatively (parser); /* Look for the optional `template' keyword, if this isn't the first time through the loop. */ if (success) template_keyword_p = cp_parser_optional_template_keyword (parser); else template_keyword_p = false; /* Save the old scope since the name lookup we are about to do might destroy it. */ old_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; /* In a declarator-id like "X<T>::I::Y<T>" we must be able to look up names in "X<T>::I" in order to determine that "Y" is a template. So, if we have a typename at this point, we make an effort to look through it. */ if (is_declaration && !typename_keyword_p && parser->scope && TREE_CODE (parser->scope) == TYPENAME_TYPE) parser->scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); /* Parse the qualifying entity. */ new_scope = cp_parser_qualifying_entity (parser, typename_keyword_p, template_keyword_p, check_dependency_p, type_p, is_declaration); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); /* If we found what we wanted, we keep going; otherwise, we're done. */ if (!cp_parser_parse_definitely (parser)) { bool error_p = false; /* Restore the OLD_SCOPE since it was valid before the failed attempt at finding the last class-or-namespace-name. */ parser->scope = old_scope; parser->qualifying_scope = saved_qualifying_scope; /* If the next token is a decltype, and the one after that is a `::', then the decltype has failed to resolve to a class or enumeration type. Give this error even when parsing tentatively since it can't possibly be valid--and we're going to replace it with a CPP_NESTED_NAME_SPECIFIER below, so we won't get another chance.*/ if (cp_lexer_next_token_is (parser->lexer, CPP_DECLTYPE) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE)) { token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "decltype evaluates to %qT, " "which is not a class or enumeration type", token->u.value); parser->scope = error_mark_node; error_p = true; /* As below. */ success = true; cp_lexer_consume_token (parser->lexer); } if (cp_parser_uncommitted_to_tentative_parse_p (parser)) break; /* If the next token is an identifier, and the one after that is a `::', then any valid interpretation would have found a class-or-namespace-name. */ while (cp_lexer_next_token_is (parser->lexer, CPP_NAME) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE) && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)) { token = cp_lexer_consume_token (parser->lexer); if (!error_p) { if (!token->ambiguous_p) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, token->u.value, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, token->location); if (TREE_CODE (decl) == TEMPLATE_DECL) error_at (token->location, "%qD used without template parameters", decl); else if (ambiguous_decls) { error_at (token->location, "reference to %qD is ambiguous", token->u.value); print_candidates (ambiguous_decls); decl = error_mark_node; } else { if (cxx_dialect != cxx98) cp_parser_name_lookup_error (parser, token->u.value, decl, NLE_NOT_CXX98, token->location); else cp_parser_name_lookup_error (parser, token->u.value, decl, NLE_CXX98, token->location); } } parser->scope = error_mark_node; error_p = true; /* Treat this as a successful nested-name-specifier due to: [basic.lookup.qual] If the name found is not a class-name (clause _class_) or namespace-name (_namespace.def_), the program is ill-formed. */ success = true; } cp_lexer_consume_token (parser->lexer); } break; } /* We've found one valid nested-name-specifier. */ success = true; /* Name lookup always gives us a DECL. */ if (TREE_CODE (new_scope) == TYPE_DECL) new_scope = TREE_TYPE (new_scope); /* Uses of "template" must be followed by actual templates. */ if (template_keyword_p && !(CLASS_TYPE_P (new_scope) && ((CLASSTYPE_USE_TEMPLATE (new_scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope))) || CLASSTYPE_IS_TEMPLATE (new_scope))) && !(TREE_CODE (new_scope) == TYPENAME_TYPE && (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope)) == TEMPLATE_ID_EXPR))) permerror (input_location, TYPE_P (new_scope) ? G_("%qT is not a template") : G_("%qD is not a template"), new_scope); /* If it is a class scope, try to complete it; we are about to be looking up names inside the class. */ if (TYPE_P (new_scope) /* Since checking types for dependency can be expensive, avoid doing it if the type is already complete. */ && !COMPLETE_TYPE_P (new_scope) /* Do not try to complete dependent types. */ && !dependent_type_p (new_scope)) { new_scope = complete_type (new_scope); /* If it is a typedef to current class, use the current class instead, as the typedef won't have any names inside it yet. */ if (!COMPLETE_TYPE_P (new_scope) && currently_open_class (new_scope)) new_scope = TYPE_MAIN_VARIANT (new_scope); } /* Make sure we look in the right scope the next time through the loop. */ parser->scope = new_scope; } /* If parsing tentatively, replace the sequence of tokens that makes up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages. */ if (success && start) { cp_token *token; token = cp_lexer_token_at (parser->lexer, start); /* Reset the contents of the START token. */ token->type = CPP_NESTED_NAME_SPECIFIER; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = ggc_alloc_cleared_tree_check (); token->u.tree_check_value->value = parser->scope; token->u.tree_check_value->checks = get_deferred_access_checks (); token->u.tree_check_value->qualifying_scope = parser->qualifying_scope; token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start); } if (start) pop_to_parent_deferring_access_checks (); return success ? parser->scope : NULL_TREE; } /* Parse a nested-name-specifier. See cp_parser_nested_name_specifier_opt for details. This function behaves identically, except that it will an issue an error if no nested-name-specifier is present. */ static tree cp_parser_nested_name_specifier (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree scope; /* Look for the nested-name-specifier. */ scope = cp_parser_nested_name_specifier_opt (parser, typename_keyword_p, check_dependency_p, type_p, is_declaration); /* If it was not present, issue an error message. */ if (!scope) { cp_parser_error (parser, "expected nested-name-specifier"); parser->scope = NULL_TREE; } return scope; } /* Parse the qualifying entity in a nested-name-specifier. For C++98, this is either a class-name or a namespace-name (which corresponds to the class-or-namespace-name production in the grammar). For C++0x, it can also be a type-name that refers to an enumeration type or a simple-template-id. TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect. TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect. CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up. TYPE_P is TRUE iff the next name should be taken as a class-name, even the same name is declared to be another entity in the same scope. Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL) specified by the class-or-namespace-name. If neither is found the ERROR_MARK_NODE is returned. */ static tree cp_parser_qualifying_entity (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree scope; bool only_class_p; bool successful_parse_p; /* DR 743: decltype can appear in a nested-name-specifier. */ if (cp_lexer_next_token_is_decltype (parser->lexer)) { scope = cp_parser_decltype (parser); if (TREE_CODE (scope) != ENUMERAL_TYPE && !MAYBE_CLASS_TYPE_P (scope)) { cp_parser_simulate_error (parser); return error_mark_node; } if (TYPE_NAME (scope)) scope = TYPE_NAME (scope); return scope; } /* Before we try to parse the class-name, we must save away the current PARSER->SCOPE since cp_parser_class_name will destroy it. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* Try for a class-name first. If the SAVED_SCOPE is a type, then there is no need to look for a namespace-name. */ only_class_p = template_keyword_p || (saved_scope && TYPE_P (saved_scope) && cxx_dialect == cxx98); if (!only_class_p) cp_parser_parse_tentatively (parser); scope = cp_parser_class_name (parser, typename_keyword_p, template_keyword_p, type_p ? class_type : none_type, check_dependency_p, /*class_head_p=*/false, is_declaration); successful_parse_p = only_class_p || cp_parser_parse_definitely (parser); /* If that didn't work and we're in C++0x mode, try for a type-name. */ if (!only_class_p && cxx_dialect != cxx98 && !successful_parse_p) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* Parse tentatively. */ cp_parser_parse_tentatively (parser); /* Parse a type-name */ scope = cp_parser_type_name (parser); /* "If the name found does not designate a namespace or a class, enumeration, or dependent type, the program is ill-formed." We cover classes and dependent types above and namespaces below, so this code is only looking for enums. */ if (!scope || TREE_CODE (scope) != TYPE_DECL || TREE_CODE (TREE_TYPE (scope)) != ENUMERAL_TYPE) cp_parser_simulate_error (parser); successful_parse_p = cp_parser_parse_definitely (parser); } /* If that didn't work, try for a namespace-name. */ if (!only_class_p && !successful_parse_p) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If we are not looking at an identifier followed by the scope resolution operator, then this is not part of a nested-name-specifier. (Note that this function is only used to parse the components of a nested-name-specifier.) */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME) || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) return error_mark_node; scope = cp_parser_namespace_name (parser); } return scope; } /* Parse a postfix-expression. postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( expression-list [opt] ) simple-type-specifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier identifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier template [opt] template-id ( expression-list [opt] ) postfix-expression . template [opt] id-expression postfix-expression -> template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> pseudo-destructor-name postfix-expression ++ postfix-expression -- dynamic_cast < type-id > ( expression ) static_cast < type-id > ( expression ) reinterpret_cast < type-id > ( expression ) const_cast < type-id > ( expression ) typeid ( expression ) typeid ( type-id ) GNU Extension: postfix-expression: ( type-id ) { initializer-list , [opt] } This extension is a GNU version of the C99 compound-literal construct. (The C99 grammar uses `type-name' instead of `type-id', but they are essentially the same concept.) If ADDRESS_P is true, the postfix expression is the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. If MEMBER_ACCESS_ONLY_P, we only allow postfix expressions that are class member access expressions [expr.ref]. Returns a representation of the expression. */ static tree cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p, bool member_access_only_p, cp_id_kind * pidk_return) { cp_token *token; enum rid keyword; cp_id_kind idk = CP_ID_KIND_NONE; tree postfix_expression = NULL_TREE; bool is_member_access = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some of the productions are determined by keywords. */ keyword = token->keyword; switch (keyword) { case RID_DYNCAST: case RID_STATCAST: case RID_REINTCAST: case RID_CONSTCAST: { tree type; tree expression; const char *saved_message; /* All of these can be handled in the same way from the point of view of parsing. Begin by consuming the token identifying the cast. */ cp_lexer_consume_token (parser->lexer); /* New types cannot be defined in the cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in casts"); /* Look for the opening `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Parse the type to which we are casting. */ type = cp_parser_type_id (parser); /* Look for the closing `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; /* And the expression which is being cast. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); expression = cp_parser_expression (parser, /*cast_p=*/true, & idk); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CAST)) return error_mark_node; switch (keyword) { case RID_DYNCAST: postfix_expression = build_dynamic_cast (type, expression, tf_warning_or_error); break; case RID_STATCAST: postfix_expression = build_static_cast (type, expression, tf_warning_or_error); break; case RID_REINTCAST: postfix_expression = build_reinterpret_cast (type, expression, tf_warning_or_error); break; case RID_CONSTCAST: postfix_expression = build_const_cast (type, expression, tf_warning_or_error); break; default: gcc_unreachable (); } } break; case RID_TYPEID: { tree type; const char *saved_message; bool saved_in_type_id_in_expr_p; /* Consume the `typeid' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `(' token. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Types cannot be defined in a `typeid' expression. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a %<typeid%> expression"); /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Try a type-id first. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)' token. Otherwise, we can't be sure that we're not looking at an expression: consider `typeid (int (3))', for example. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, simply lookup the type-id. */ if (cp_parser_parse_definitely (parser)) postfix_expression = get_typeid (type); /* Otherwise, fall back to the expression variant. */ else { tree expression; /* Look for an expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false, & idk); /* Compute its typeid. */ postfix_expression = build_typeid (expression); /* Look for the `)' token. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* `typeid' may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_TYPEID)) return error_mark_node; } break; case RID_TYPENAME: { tree type; /* The syntax permitted here is the same permitted for an elaborated-type-specifier. */ type = cp_parser_elaborated_type_specifier (parser, /*is_friend=*/false, /*is_declaration=*/false); postfix_expression = cp_parser_functional_cast (parser, type); } break; default: { tree type; /* If the next thing is a simple-type-specifier, we may be looking at a functional cast. We could also be looking at an id-expression. So, we try the functional cast, and if that doesn't work we fall back to the primary-expression. */ cp_parser_parse_tentatively (parser); /* Look for the simple-type-specifier. */ type = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); /* Parse the cast itself. */ if (!cp_parser_error_occurred (parser)) postfix_expression = cp_parser_functional_cast (parser, type); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) break; /* If the functional-cast didn't work out, try a compound-literal. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { VEC(constructor_elt,gc) *initializer_list = NULL; bool saved_in_type_id_in_expr_p; cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Look for the `{'. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* If things aren't going well, there's no need to keep going. */ if (!cp_parser_error_occurred (parser)) { bool non_constant_p; /* Parse the initializer-list. */ initializer_list = cp_parser_initializer_list (parser, &non_constant_p); /* Allow a trailing `,'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* If that worked, we're definitely looking at a compound-literal expression. */ if (cp_parser_parse_definitely (parser)) { /* Warn the user that a compound literal is not allowed in standard C++. */ pedwarn (input_location, OPT_pedantic, "ISO C++ forbids compound-literals"); /* For simplicity, we disallow compound literals in constant-expressions. We could allow compound literals of integer type, whose initializer was a constant, in constant expressions. Permitting that usage, as a further extension, would not change the meaning of any currently accepted programs. (Of course, as compound literals are not part of ISO C++, the standard has nothing to say.) */ if (cp_parser_non_integral_constant_expression (parser, NIC_NCC)) { postfix_expression = error_mark_node; break; } /* Form the representation of the compound-literal. */ postfix_expression = (finish_compound_literal (type, build_constructor (init_list_type_node, initializer_list), tf_warning_or_error)); break; } } /* It must be a primary-expression. */ postfix_expression = cp_parser_primary_expression (parser, address_p, cast_p, /*template_arg_p=*/false, &idk); } break; } /* Keep looping until the postfix-expression is complete. */ while (true) { if (idk == CP_ID_KIND_UNQUALIFIED && TREE_CODE (postfix_expression) == IDENTIFIER_NODE && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) /* It is not a Koenig lookup function call. */ postfix_expression = unqualified_name_lookup_error (postfix_expression); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: postfix_expression = cp_parser_postfix_open_square_expression (parser, postfix_expression, false); idk = CP_ID_KIND_NONE; is_member_access = false; break; case CPP_OPEN_PAREN: /* postfix-expression ( expression-list [opt] ) */ { bool koenig_p; bool is_builtin_constant_p; bool saved_integral_constant_expression_p = false; bool saved_non_integral_constant_expression_p = false; VEC(tree,gc) *args; is_member_access = false; is_builtin_constant_p = DECL_IS_BUILTIN_CONSTANT_P (postfix_expression); if (is_builtin_constant_p) { /* The whole point of __builtin_constant_p is to allow non-constant expressions to appear as arguments. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; } args = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); if (is_builtin_constant_p) { parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; } if (args == NULL) { postfix_expression = error_mark_node; break; } /* Function calls are not permitted in constant-expressions. */ if (! builtin_valid_in_constant_expr_p (postfix_expression) && cp_parser_non_integral_constant_expression (parser, NIC_FUNC_CALL)) { postfix_expression = error_mark_node; release_tree_vector (args); break; } koenig_p = false; if (idk == CP_ID_KIND_UNQUALIFIED || idk == CP_ID_KIND_TEMPLATE_ID) { if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE) { if (!VEC_empty (tree, args)) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) postfix_expression = perform_koenig_lookup (postfix_expression, args, /*include_std=*/false, tf_warning_or_error); } else postfix_expression = unqualified_fn_lookup_error (postfix_expression); } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the expected resolution of DR 218. */ else if (!VEC_empty (tree, args) && is_overloaded_fn (postfix_expression)) { tree fn = get_first_fn (postfix_expression); fn = STRIP_TEMPLATE (fn); /* Do not do argument dependent lookup if regular lookup finds a member function or a block-scope function declaration. [basic.lookup.argdep]/3 */ if (!DECL_FUNCTION_MEMBER_P (fn) && !DECL_LOCAL_FUNCTION_P (fn)) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) postfix_expression = perform_koenig_lookup (postfix_expression, args, /*include_std=*/false, tf_warning_or_error); } } } if (TREE_CODE (postfix_expression) == COMPONENT_REF) { tree instance = TREE_OPERAND (postfix_expression, 0); tree fn = TREE_OPERAND (postfix_expression, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (args))) { postfix_expression = build_nt_call_vec (postfix_expression, args); release_tree_vector (args); break; } if (BASELINK_P (fn)) { postfix_expression = (build_new_method_call (instance, fn, &args, NULL_TREE, (idk == CP_ID_KIND_QUALIFIED ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, tf_warning_or_error)); } else postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/false, /*koenig_p=*/false, tf_warning_or_error); } else if (TREE_CODE (postfix_expression) == OFFSET_REF || TREE_CODE (postfix_expression) == MEMBER_REF || TREE_CODE (postfix_expression) == DOTSTAR_EXPR) postfix_expression = (build_offset_ref_call_from_tree (postfix_expression, &args)); else if (idk == CP_ID_KIND_QUALIFIED) /* A call to a static class member, or a namespace-scope function. */ postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/true, koenig_p, tf_warning_or_error); else /* All other function calls. */ postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/false, koenig_p, tf_warning_or_error); /* The POSTFIX_EXPRESSION is certainly no longer an id. */ idk = CP_ID_KIND_NONE; release_tree_vector (args); } break; case CPP_DOT: case CPP_DEREF: /* postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name */ /* Consume the `.' or `->' operator. */ cp_lexer_consume_token (parser->lexer); postfix_expression = cp_parser_postfix_dot_deref_expression (parser, token->type, postfix_expression, false, &idk, token->location); is_member_access = true; break; case CPP_PLUS_PLUS: /* postfix-expression ++ */ /* Consume the `++' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTINCREMENT_EXPR); /* Increments may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_INC)) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; is_member_access = false; break; case CPP_MINUS_MINUS: /* postfix-expression -- */ /* Consume the `--' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTDECREMENT_EXPR); /* Decrements may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_DEC)) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; is_member_access = false; break; default: if (pidk_return != NULL) * pidk_return = idk; if (member_access_only_p) return is_member_access? postfix_expression : error_mark_node; else return postfix_expression; } } /* We should never get here. */ gcc_unreachable (); return error_mark_node; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression [ expression ] postfix-expression [ braced-init-list ] (C++11) FOR_OFFSETOF is set if we're being called in that context, which changes how we deal with integer constant expressions. */ static tree cp_parser_postfix_open_square_expression (cp_parser *parser, tree postfix_expression, bool for_offsetof) { tree index; /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the index expression. */ /* ??? For offsetof, there is a question of what to allow here. If offsetof is not being used in an integral constant expression context, then we *could* get the right answer by computing the value at runtime. If we are in an integral constant expression context, then we might could accept any constant expression; hard to say without analysis. Rather than open the barn door too wide right away, allow only integer constant expressions here. */ if (for_offsetof) index = cp_parser_constant_expression (parser, false, NULL); else { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_nonconst_p; maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); index = cp_parser_braced_list (parser, &expr_nonconst_p); } else index = cp_parser_expression (parser, /*cast_p=*/false, NULL); } /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Build the ARRAY_REF. */ postfix_expression = grok_array_decl (postfix_expression, index); /* When not doing offsetof, array references are not permitted in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, NIC_ARRAY_REF))) postfix_expression = error_mark_node; return postfix_expression; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name FOR_OFFSETOF is set if we're being called in that context. That sorta limits what of the above we'll actually accept, but nevermind. TOKEN_TYPE is the "." or "->" token, which will already have been removed from the stream. */ static tree cp_parser_postfix_dot_deref_expression (cp_parser *parser, enum cpp_ttype token_type, tree postfix_expression, bool for_offsetof, cp_id_kind *idk, location_t location) { tree name; bool dependent_p; bool pseudo_destructor_p; tree scope = NULL_TREE; /* If this is a `->' operator, dereference the pointer. */ if (token_type == CPP_DEREF) postfix_expression = build_x_arrow (postfix_expression); /* Check to see whether or not the expression is type-dependent. */ dependent_p = type_dependent_expression_p (postfix_expression); /* The identifier following the `->' or `.' is not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; *idk = CP_ID_KIND_NONE; /* Enter the scope corresponding to the type of the object given by the POSTFIX_EXPRESSION. */ if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE) { scope = TREE_TYPE (postfix_expression); /* According to the standard, no expression should ever have reference type. Unfortunately, we do not currently match the standard in this respect in that our internal representation of an expression may have reference type even when the standard says it does not. Therefore, we have to manually obtain the underlying type here. */ scope = non_reference (scope); /* The type of the POSTFIX_EXPRESSION must be complete. */ if (scope == unknown_type_node) { error_at (location, "%qE does not have class type", postfix_expression); scope = NULL_TREE; } /* Unlike the object expression in other contexts, *this is not required to be of complete type for purposes of class member access (5.2.5) outside the member function body. */ else if (scope != current_class_ref && !(processing_template_decl && scope == current_class_type)) scope = complete_type_or_else (scope, NULL_TREE); /* Let the name lookup machinery know that we are processing a class member access expression. */ parser->context->object_type = scope; /* If something went wrong, we want to be able to discern that case, as opposed to the case where there was no SCOPE due to the type of expression being dependent. */ if (!scope) scope = error_mark_node; /* If the SCOPE was erroneous, make the various semantic analysis functions exit quickly -- and without issuing additional error messages. */ if (scope == error_mark_node) postfix_expression = error_mark_node; } /* Assume this expression is not a pseudo-destructor access. */ pseudo_destructor_p = false; /* If the SCOPE is a scalar type, then, if this is a valid program, we must be looking at a pseudo-destructor-name. If POSTFIX_EXPRESSION is type dependent, it can be pseudo-destructor-name or something else. Try to parse it as pseudo-destructor-name first. */ if ((scope && SCALAR_TYPE_P (scope)) || dependent_p) { tree s; tree type; cp_parser_parse_tentatively (parser); /* Parse the pseudo-destructor-name. */ s = NULL_TREE; cp_parser_pseudo_destructor_name (parser, &s, &type); if (dependent_p && (cp_parser_error_occurred (parser) || TREE_CODE (type) != TYPE_DECL || !SCALAR_TYPE_P (TREE_TYPE (type)))) cp_parser_abort_tentative_parse (parser); else if (cp_parser_parse_definitely (parser)) { pseudo_destructor_p = true; postfix_expression = finish_pseudo_destructor_expr (postfix_expression, s, TREE_TYPE (type)); } } if (!pseudo_destructor_p) { /* If the SCOPE is not a scalar type, we are looking at an ordinary class member access expression, rather than a pseudo-destructor-name. */ bool template_p; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Parse the id-expression. */ name = (cp_parser_id_expression (parser, cp_parser_optional_template_keyword (parser), /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false)); /* In general, build a SCOPE_REF if the member name is qualified. However, if the name was not dependent and has already been resolved; there is no need to build the SCOPE_REF. For example; struct X { void f(); }; template <typename T> void f(T* t) { t->X::f(); } Even though "t" is dependent, "X::f" is not and has been resolved to a BASELINK; there is no need to include scope information. */ /* But we do need to remember that there was an explicit scope for virtual function calls. */ if (parser->scope) *idk = CP_ID_KIND_QUALIFIED; /* If the name is a template-id that names a type, we will get a TYPE_DECL here. That is invalid code. */ if (TREE_CODE (name) == TYPE_DECL) { error_at (token->location, "invalid use of %qD", name); postfix_expression = error_mark_node; } else { if (name != error_mark_node && !BASELINK_P (name) && parser->scope) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) { error_at (token->location, "%<%D::%D%> is not a class member", parser->scope, name); postfix_expression = error_mark_node; } else name = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, template_p); parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } if (parser->scope && name && BASELINK_P (name)) adjust_result_of_qualified_name_lookup (name, parser->scope, scope); postfix_expression = finish_class_member_access_expr (postfix_expression, name, template_p, tf_warning_or_error); } } /* We no longer need to look up names in the scope of the object on the left-hand side of the `.' or `->' operator. */ parser->context->object_type = NULL_TREE; /* Outside of offsetof, these operators may not appear in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, token_type == CPP_DEREF ? NIC_ARROW : NIC_POINT))) postfix_expression = error_mark_node; return postfix_expression; } /* Parse a parenthesized expression-list. expression-list: assignment-expression expression-list, assignment-expression attribute-list: expression-list identifier identifier, expression-list CAST_P is true if this expression is the target of a cast. ALLOW_EXPANSION_P is true if this expression allows expansion of an argument pack. Returns a vector of trees. Each element is a representation of an assignment-expression. NULL is returned if the ( and or ) are missing. An empty, but allocated, vector is returned on no expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is id_attr if we are parsing an attribute list for an attribute that wants a plain identifier argument, normal_attr for an attribute that wants an expression, or non_attr if we aren't parsing an attribute list. If NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or not all of the expressions in the list were constant. */ static VEC(tree,gc) * cp_parser_parenthesized_expression_list (cp_parser* parser, int is_attribute_list, bool cast_p, bool allow_expansion_p, bool *non_constant_p) { VEC(tree,gc) *expression_list; bool fold_expr_p = is_attribute_list != non_attr; tree identifier = NULL_TREE; bool saved_greater_than_is_operator_p; /* Assume all the expressions will be constant. */ if (non_constant_p) *non_constant_p = false; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return NULL; expression_list = make_tree_vector (); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Consume expressions until there are no more. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) while (true) { tree expr; /* At the beginning of attribute lists, check to see if the next token is an identifier. */ if (is_attribute_list == id_attr && cp_lexer_peek_token (parser->lexer)->type == CPP_NAME) { cp_token *token; /* Consume the identifier. */ token = cp_lexer_consume_token (parser->lexer); /* Save the identifier. */ identifier = token->u.value; } else { bool expr_non_constant_p; /* Parse the next assignment-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* A braced-init-list. */ maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expr = cp_parser_braced_list (parser, &expr_non_constant_p); if (non_constant_p && expr_non_constant_p) *non_constant_p = true; } else if (non_constant_p) { expr = (cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, &expr_non_constant_p)); if (expr_non_constant_p) *non_constant_p = true; } else expr = cp_parser_assignment_expression (parser, cast_p, NULL); if (fold_expr_p) expr = fold_non_dependent_expr (expr); /* If we have an ellipsis, then this is an expression expansion. */ if (allow_expansion_p && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Build the argument pack. */ expr = make_pack_expansion (expr); } /* Add it to the list. We add error_mark_node expressions to the list, so that we can still tell if the correct form for a parenthesized expression-list is found. That gives better errors. */ VEC_safe_push (tree, gc, expression_list, expr); if (expr == error_mark_node) goto skip_comma; } /* After the first item, attribute lists look the same as expression lists. */ is_attribute_list = non_attr; get_comma:; /* If the next token isn't a `,', then we are done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { int ending; skip_comma:; /* We try and resync to an unnested comma, as that will give the user better diagnostics. */ ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; if (!ending) { parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; return NULL; } } parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; if (identifier) VEC_safe_insert (tree, gc, expression_list, 0, identifier); return expression_list; } /* Parse a pseudo-destructor-name. pseudo-destructor-name: :: [opt] nested-name-specifier [opt] type-name :: ~ type-name :: [opt] nested-name-specifier template template-id :: ~ type-name :: [opt] nested-name-specifier [opt] ~ type-name If either of the first two productions is used, sets *SCOPE to the TYPE specified before the final `::'. Otherwise, *SCOPE is set to NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name, or ERROR_MARK_NODE if the parse fails. */ static void cp_parser_pseudo_destructor_name (cp_parser* parser, tree* scope, tree* type) { bool nested_name_specifier_p; /* Assume that things will not work out. */ *type = error_mark_node; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); /* Now, if we saw a nested-name-specifier, we might be doing the second production. */ if (nested_name_specifier_p && cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-id. */ cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/false, /*is_declaration=*/true); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); } /* If the next token is not a `~', then there might be some additional qualification. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL)) { /* At this point, we're looking for "type-name :: ~". The type-name must not be a class-name, since this is a pseudo-destructor. So, it must be either an enum-name, or a typedef-name -- both of which are just identifiers. So, we peek ahead to check that the "::" and "~" tokens are present; if they are not, then we can avoid calling type_name. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_NAME || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE || cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL) { cp_parser_error (parser, "non-scalar type"); return; } /* Look for the type-name. */ *scope = TREE_TYPE (cp_parser_nonclass_name (parser)); if (*scope == error_mark_node) return; /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); } else *scope = NULL_TREE; /* Look for the `~'. */ cp_parser_require (parser, CPP_COMPL, RT_COMPL); /* Once we see the ~, this has to be a pseudo-destructor. */ if (!processing_template_decl && !cp_parser_error_occurred (parser)) cp_parser_commit_to_tentative_parse (parser); /* Look for the type-name again. We are not responsible for checking that it matches the first type-name. */ *type = cp_parser_nonclass_name (parser); } /* Parse a unary-expression. unary-expression: postfix-expression ++ cast-expression -- cast-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-id ) alignof ( type-id ) [C++0x] new-expression delete-expression GNU Extensions: unary-expression: __extension__ cast-expression __alignof__ unary-expression __alignof__ ( type-id ) alignof unary-expression [C++0x] __real__ cast-expression __imag__ cast-expression && identifier ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p, cp_id_kind * pidk) { cp_token *token; enum tree_code unary_operator; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some keywords give away the kind of expression. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_ALIGNOF: case RID_SIZEOF: { tree operand; enum tree_code op; op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand. */ operand = cp_parser_sizeof_operand (parser, keyword); if (TYPE_P (operand)) return cxx_sizeof_or_alignof_type (operand, op, true); else { /* ISO C++ defines alignof only with types, not with expressions. So pedwarn if alignof is used with a non- type expression. However, __alignof__ is ok. */ if (!strcmp (IDENTIFIER_POINTER (token->u.value), "alignof")) pedwarn (token->location, OPT_pedantic, "ISO C++ does not allow %<alignof%> " "with a non-type"); return cxx_sizeof_or_alignof_expr (operand, op, true); } } case RID_NEW: return cp_parser_new_expression (parser); case RID_DELETE: return cp_parser_delete_expression (parser); case RID_EXTENSION: { /* The saved value of the PEDANTIC flag. */ int saved_pedantic; tree expr; /* Save away the PEDANTIC flag. */ cp_parser_extension_opt (parser, &saved_pedantic); /* Parse the cast-expression. */ expr = cp_parser_simple_cast_expression (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return expr; } case RID_REALPART: case RID_IMAGPART: { tree expression; /* Consume the `__real__' or `__imag__' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* Create the complete representation. */ return build_x_unary_op ((keyword == RID_REALPART ? REALPART_EXPR : IMAGPART_EXPR), expression, tf_warning_or_error); } break; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: return cp_parser_transaction_expression (parser, keyword); case RID_NOEXCEPT: { tree expr; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; bool saved_greater_than_is_operator_p; cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in %<noexcept%> expressions"); saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; expr = cp_parser_expression (parser, false, NULL); --c_inhibit_evaluation_warnings; --cp_unevaluated_operand; parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; parser->type_definition_forbidden_message = saved_message; cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return finish_noexcept_expr (expr, tf_warning_or_error); } default: break; } } /* Look for the `:: new' and `:: delete', which also signal the beginning of a new-expression, or delete-expression, respectively. If the next token is `::', then it might be one of these. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) { enum rid keyword; /* See if the token after the `::' is one of the keywords in which we're interested. */ keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword; /* If it's `new', we have a new-expression. */ if (keyword == RID_NEW) return cp_parser_new_expression (parser); /* Similarly, for `delete'. */ else if (keyword == RID_DELETE) return cp_parser_delete_expression (parser); } /* Look for a unary operator. */ unary_operator = cp_parser_unary_operator (token); /* The `++' and `--' operators can be handled similarly, even though they are not technically unary-operators in the grammar. */ if (unary_operator == ERROR_MARK) { if (token->type == CPP_PLUS_PLUS) unary_operator = PREINCREMENT_EXPR; else if (token->type == CPP_MINUS_MINUS) unary_operator = PREDECREMENT_EXPR; /* Handle the GNU address-of-label extension. */ else if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_AND_AND) { tree identifier; tree expression; location_t loc = cp_lexer_peek_token (parser->lexer)->location; /* Consume the '&&' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); /* Create an expression representing the address. */ expression = finish_label_address_expr (identifier, loc); if (cp_parser_non_integral_constant_expression (parser, NIC_ADDR_LABEL)) expression = error_mark_node; return expression; } } if (unary_operator != ERROR_MARK) { tree cast_expression; tree expression = error_mark_node; non_integral_constant non_constant_p = NIC_NONE; /* Consume the operator token. */ token = cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ cast_expression = cp_parser_cast_expression (parser, unary_operator == ADDR_EXPR, /*cast_p=*/false, pidk); /* Now, build an appropriate representation. */ switch (unary_operator) { case INDIRECT_REF: non_constant_p = NIC_STAR; expression = build_x_indirect_ref (cast_expression, RO_UNARY_STAR, tf_warning_or_error); break; case ADDR_EXPR: non_constant_p = NIC_ADDR; /* Fall through. */ case BIT_NOT_EXPR: expression = build_x_unary_op (unary_operator, cast_expression, tf_warning_or_error); break; case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: non_constant_p = unary_operator == PREINCREMENT_EXPR ? NIC_PREINCREMENT : NIC_PREDECREMENT; /* Fall through. */ case UNARY_PLUS_EXPR: case NEGATE_EXPR: case TRUTH_NOT_EXPR: expression = finish_unary_op_expr (unary_operator, cast_expression); break; default: gcc_unreachable (); } if (non_constant_p != NIC_NONE && cp_parser_non_integral_constant_expression (parser, non_constant_p)) expression = error_mark_node; return expression; } return cp_parser_postfix_expression (parser, address_p, cast_p, /*member_access_only_p=*/false, pidk); } /* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a unary-operator, the corresponding tree code is returned. */ static enum tree_code cp_parser_unary_operator (cp_token* token) { switch (token->type) { case CPP_MULT: return INDIRECT_REF; case CPP_AND: return ADDR_EXPR; case CPP_PLUS: return UNARY_PLUS_EXPR; case CPP_MINUS: return NEGATE_EXPR; case CPP_NOT: return TRUTH_NOT_EXPR; case CPP_COMPL: return BIT_NOT_EXPR; default: return ERROR_MARK; } } /* Parse a new-expression. new-expression: :: [opt] new new-placement [opt] new-type-id new-initializer [opt] :: [opt] new new-placement [opt] ( type-id ) new-initializer [opt] Returns a representation of the expression. */ static tree cp_parser_new_expression (cp_parser* parser) { bool global_scope_p; VEC(tree,gc) *placement; tree type; VEC(tree,gc) *initializer; tree nelts; tree ret; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `new' operator. */ cp_parser_require_keyword (parser, RID_NEW, RT_NEW); /* There's no easy way to tell a new-placement from the `( type-id )' construct. */ cp_parser_parse_tentatively (parser); /* Look for a new-placement. */ placement = cp_parser_new_placement (parser); /* If that didn't work out, there's no new-placement. */ if (!cp_parser_parse_definitely (parser)) { if (placement != NULL) release_tree_vector (placement); placement = NULL; } /* If the next token is a `(', then we have a parenthesized type-id. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_token *token; const char *saved_message = parser->type_definition_forbidden_message; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ parser->type_definition_forbidden_message = G_("types may not be defined in a new-expression"); type = cp_parser_type_id (parser); parser->type_definition_forbidden_message = saved_message; /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); token = cp_lexer_peek_token (parser->lexer); /* There should not be a direct-new-declarator in this production, but GCC used to allowed this, so we check and emit a sensible error message for this case. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { error_at (token->location, "array bound forbidden after parenthesized type-id"); inform (token->location, "try removing the parentheses around the type-id"); cp_parser_direct_new_declarator (parser); } nelts = NULL_TREE; } /* Otherwise, there must be a new-type-id. */ else type = cp_parser_new_type_id (parser, &nelts); /* If the next token is a `(' or '{', then we have a new-initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) initializer = cp_parser_new_initializer (parser); else initializer = NULL; /* A new-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_NEW)) ret = error_mark_node; else { /* Create a representation of the new-expression. */ ret = build_new (&placement, type, nelts, &initializer, global_scope_p, tf_warning_or_error); } if (placement != NULL) release_tree_vector (placement); if (initializer != NULL) release_tree_vector (initializer); return ret; } /* Parse a new-placement. new-placement: ( expression-list ) Returns the same representation as for an expression-list. */ static VEC(tree,gc) * cp_parser_new_placement (cp_parser* parser) { VEC(tree,gc) *expression_list; /* Parse the expression-list. */ expression_list = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a new-type-id. new-type-id: type-specifier-seq new-declarator [opt] Returns the TYPE allocated. If the new-type-id indicates an array type, *NELTS is set to the number of elements in the last array bound; the TYPE will not include the last array bound. */ static tree cp_parser_new_type_id (cp_parser* parser, tree *nelts) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *new_declarator; cp_declarator *declarator; cp_declarator *outer_declarator; const char *saved_message; tree type; /* The type-specifier sequence must not contain type definitions. (It cannot contain declarations of new types either, but if they are not definitions we will catch that because they are not complete.) */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a new-type-id"); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifier_seq); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; /* Parse the new-declarator. */ new_declarator = cp_parser_new_declarator_opt (parser); /* Determine the number of elements in the last array dimension, if any. */ *nelts = NULL_TREE; /* Skip down to the last array dimension. */ declarator = new_declarator; outer_declarator = NULL; while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_ptrmem)) { outer_declarator = declarator; declarator = declarator->declarator; } while (declarator && declarator->kind == cdk_array && declarator->declarator && declarator->declarator->kind == cdk_array) { outer_declarator = declarator; declarator = declarator->declarator; } if (declarator && declarator->kind == cdk_array) { *nelts = declarator->u.array.bounds; if (*nelts == error_mark_node) *nelts = integer_one_node; if (outer_declarator) outer_declarator->declarator = declarator->declarator; else new_declarator = NULL; } type = groktypename (&type_specifier_seq, new_declarator, false); return type; } /* Parse an (optional) new-declarator. new-declarator: ptr-operator new-declarator [opt] direct-new-declarator Returns the declarator. */ static cp_declarator * cp_parser_new_declarator_opt (cp_parser* parser) { enum tree_code code; tree type; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Look for a ptr-operator. */ code = cp_parser_ptr_operator (parser, &type, &cv_quals); /* If that worked, look for more new-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_new_declarator_opt (parser); return cp_parser_make_indirect_declarator (code, type, cv_quals, declarator); } /* If the next token is a `[', there is a direct-new-declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) return cp_parser_direct_new_declarator (parser); return NULL; } /* Parse a direct-new-declarator. direct-new-declarator: [ expression ] direct-new-declarator [constant-expression] */ static cp_declarator * cp_parser_direct_new_declarator (cp_parser* parser) { cp_declarator *declarator = NULL; while (true) { tree expression; /* Look for the opening `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE); /* The first expression is not required to be constant. */ if (!declarator) { cp_token *token = cp_lexer_peek_token (parser->lexer); expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* The standard requires that the expression have integral type. DR 74 adds enumeration types. We believe that the real intent is that these expressions be handled like the expression in a `switch' condition, which also allows classes with a single conversion to integral or enumeration type. */ if (!processing_template_decl) { expression = build_expr_type_conversion (WANT_INT | WANT_ENUM, expression, /*complain=*/true); if (!expression) { error_at (token->location, "expression in new-declarator must have integral " "or enumeration type"); expression = error_mark_node; } } } /* But all the other expressions must be. */ else expression = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Add this bound to the declarator. */ declarator = make_array_declarator (declarator, expression); /* If the next token is not a `[', then there are no more bounds. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) break; } return declarator; } /* Parse a new-initializer. new-initializer: ( expression-list [opt] ) braced-init-list Returns a representation of the expression-list. */ static VEC(tree,gc) * cp_parser_new_initializer (cp_parser* parser) { VEC(tree,gc) *expression_list; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { tree t; bool expr_non_constant_p; maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); t = cp_parser_braced_list (parser, &expr_non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (t) = 1; expression_list = make_tree_vector_single (t); } else expression_list = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a delete-expression. delete-expression: :: [opt] delete cast-expression :: [opt] delete [ ] cast-expression Returns a representation of the expression. */ static tree cp_parser_delete_expression (cp_parser* parser) { bool global_scope_p; bool array_p; tree expression; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `delete' keyword. */ cp_parser_require_keyword (parser, RID_DELETE, RT_DELETE); /* See if the array syntax is in use. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Remember that this is the `[]' construct. */ array_p = true; } else array_p = false; /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* A delete-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_DEL)) return error_mark_node; return delete_sanity (expression, NULL_TREE, array_p, global_scope_p, tf_warning_or_error); } /* Returns true if TOKEN may start a cast-expression and false otherwise. */ static bool cp_parser_token_starts_cast_expression (cp_token *token) { switch (token->type) { case CPP_COMMA: case CPP_SEMICOLON: case CPP_QUERY: case CPP_COLON: case CPP_CLOSE_SQUARE: case CPP_CLOSE_PAREN: case CPP_CLOSE_BRACE: case CPP_DOT: case CPP_DOT_STAR: case CPP_DEREF: case CPP_DEREF_STAR: case CPP_DIV: case CPP_MOD: case CPP_LSHIFT: case CPP_RSHIFT: case CPP_LESS: case CPP_GREATER: case CPP_LESS_EQ: case CPP_GREATER_EQ: case CPP_EQ_EQ: case CPP_NOT_EQ: case CPP_EQ: case CPP_MULT_EQ: case CPP_DIV_EQ: case CPP_MOD_EQ: case CPP_PLUS_EQ: case CPP_MINUS_EQ: case CPP_RSHIFT_EQ: case CPP_LSHIFT_EQ: case CPP_AND_EQ: case CPP_XOR_EQ: case CPP_OR_EQ: case CPP_XOR: case CPP_OR: case CPP_OR_OR: case CPP_EOF: return false; /* '[' may start a primary-expression in obj-c++. */ case CPP_OPEN_SQUARE: return c_dialect_objc (); default: return true; } } /* Parse a cast-expression. cast-expression: unary-expression ( type-id ) cast-expression ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p, cp_id_kind * pidk) { /* If it's a `(', then we might be looking at a cast. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type = NULL_TREE; tree expr = NULL_TREE; bool compound_literal_p; const char *saved_message; /* There's no way to know yet whether or not this is a cast. For example, `(int (3))' is a unary-expression, while `(int) 3' is a cast. So, we resort to parsing tentatively. */ cp_parser_parse_tentatively (parser); /* Types may not be defined in a cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in casts"); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* A very tricky bit is that `(struct S) { 3 }' is a compound-literal (which we permit in C++ as an extension). But, that construct is not a cast-expression -- it is a postfix-expression. (The reason is that `(struct S) { 3 }.i' is legal; if the compound-literal were a cast-expression, you'd need an extra set of parentheses.) But, if we parse the type-id, and it happens to be a class-specifier, then we will commit to the parse at that point, because we cannot undo the action that is done when creating a new class. So, then we cannot back up and do a postfix-expression. Therefore, we scan ahead to the closing `)', and check to see if the token after the `)' is a `{'. If so, we are not looking at a cast-expression. Save tokens so that we can put them back. */ cp_lexer_save_tokens (parser->lexer); /* Skip tokens until the next token is a closing parenthesis. If we find the closing `)', and the next token is a `{', then we are looking at a compound-literal. */ compound_literal_p = (cp_parser_skip_to_closing_parenthesis (parser, false, false, /*consume_paren=*/true) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)); /* Roll back the tokens we skipped. */ cp_lexer_rollback_tokens (parser->lexer); /* If we were looking at a compound-literal, simulate an error so that the call to cp_parser_parse_definitely below will fail. */ if (compound_literal_p) cp_parser_simulate_error (parser); else { bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; /* Look for the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* At this point this can only be either a cast or a parenthesized ctor such as `(T ())' that looks like a cast to function returning T. */ if (!cp_parser_error_occurred (parser) && cp_parser_token_starts_cast_expression (cp_lexer_peek_token (parser->lexer))) { cp_parser_parse_definitely (parser); expr = cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/true, pidk); /* Warn about old-style casts, if so requested. */ if (warn_old_style_cast && !in_system_header && !VOID_TYPE_P (type) && current_lang_name != lang_name_c) warning (OPT_Wold_style_cast, "use of old-style cast"); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CAST)) return error_mark_node; /* Perform the cast. */ expr = build_c_cast (input_location, type, expr); return expr; } else cp_parser_abort_tentative_parse (parser); } /* If we get here, then it's not a cast, so it must be a unary-expression. */ return cp_parser_unary_expression (parser, address_p, cast_p, pidk); } /* Parse a binary expression of the general form: pm-expression: cast-expression pm-expression .* cast-expression pm-expression ->* cast-expression multiplicative-expression: pm-expression multiplicative-expression * pm-expression multiplicative-expression / pm-expression multiplicative-expression % pm-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression GNU Extension: relational-expression: relational-expression <? shift-expression relational-expression >? shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression and-expression: equality-expression and-expression & equality-expression exclusive-or-expression: and-expression exclusive-or-expression ^ and-expression inclusive-or-expression: exclusive-or-expression inclusive-or-expression | exclusive-or-expression logical-and-expression: inclusive-or-expression logical-and-expression && inclusive-or-expression logical-or-expression: logical-and-expression logical-or-expression || logical-and-expression All these are implemented with a single function like: binary-expression: simple-cast-expression binary-expression <token> binary-expression CAST_P is true if this expression is the target of a cast. The binops_by_token map is used to get the tree codes for each <token> type. binary-expressions are associated according to a precedence table. */ #define TOKEN_PRECEDENCE(token) \ (((token->type == CPP_GREATER \ || ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)) \ && !parser->greater_than_is_operator_p) \ ? PREC_NOT_OPERATOR \ : binops_by_token[token->type].prec) static tree cp_parser_binary_expression (cp_parser* parser, bool cast_p, bool no_toplevel_fold_p, enum cp_parser_prec prec, cp_id_kind * pidk) { cp_parser_expression_stack stack; cp_parser_expression_stack_entry *sp = &stack[0]; tree lhs, rhs; cp_token *token; enum tree_code tree_type, lhs_type, rhs_type; enum cp_parser_prec new_prec, lookahead_prec; tree overload; /* Parse the first expression. */ lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p, pidk); lhs_type = ERROR_MARK; if (cp_parser_error_occurred (parser)) return error_mark_node; for (;;) { /* Get an operator token. */ token = cp_lexer_peek_token (parser->lexer); if (warn_cxx0x_compat && token->type == CPP_RSHIFT && !parser->greater_than_is_operator_p) { if (warning_at (token->location, OPT_Wc__0x_compat, "%<>>%> operator is treated as" " two right angle brackets in C++11")) inform (token->location, "suggest parentheses around %<>>%> expression"); } new_prec = TOKEN_PRECEDENCE (token); /* Popping an entry off the stack means we completed a subexpression: - either we found a token which is not an operator (`>' where it is not an operator, or prec == PREC_NOT_OPERATOR), in which case popping will happen repeatedly; - or, we found an operator which has lower priority. This is the case where the recursive descent *ascends*, as in `3 * 4 + 5' after parsing `3 * 4'. */ if (new_prec <= prec) { if (sp == stack) break; else goto pop; } get_rhs: tree_type = binops_by_token[token->type].tree_type; /* We used the operator token. */ cp_lexer_consume_token (parser->lexer); /* For "false && x" or "true || x", x will never be executed; disable warnings while evaluating it. */ if (tree_type == TRUTH_ANDIF_EXPR) c_inhibit_evaluation_warnings += lhs == truthvalue_false_node; else if (tree_type == TRUTH_ORIF_EXPR) c_inhibit_evaluation_warnings += lhs == truthvalue_true_node; /* Extract another operand. It may be the RHS of this expression or the LHS of a new, higher priority expression. */ rhs = cp_parser_simple_cast_expression (parser); rhs_type = ERROR_MARK; /* Get another operator token. Look up its precedence to avoid building a useless (immediately popped) stack entry for common cases such as 3 + 4 + 5 or 3 * 4 + 5. */ token = cp_lexer_peek_token (parser->lexer); lookahead_prec = TOKEN_PRECEDENCE (token); if (lookahead_prec > new_prec) { /* ... and prepare to parse the RHS of the new, higher priority expression. Since precedence levels on the stack are monotonically increasing, we do not have to care about stack overflows. */ sp->prec = prec; sp->tree_type = tree_type; sp->lhs = lhs; sp->lhs_type = lhs_type; sp++; lhs = rhs; lhs_type = rhs_type; prec = new_prec; new_prec = lookahead_prec; goto get_rhs; pop: lookahead_prec = new_prec; /* If the stack is not empty, we have parsed into LHS the right side (`4' in the example above) of an expression we had suspended. We can use the information on the stack to recover the LHS (`3') from the stack together with the tree code (`MULT_EXPR'), and the precedence of the higher level subexpression (`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token, which will be used to actually build the additive expression. */ --sp; prec = sp->prec; tree_type = sp->tree_type; rhs = lhs; rhs_type = lhs_type; lhs = sp->lhs; lhs_type = sp->lhs_type; } /* Undo the disabling of warnings done above. */ if (tree_type == TRUTH_ANDIF_EXPR) c_inhibit_evaluation_warnings -= lhs == truthvalue_false_node; else if (tree_type == TRUTH_ORIF_EXPR) c_inhibit_evaluation_warnings -= lhs == truthvalue_true_node; overload = NULL; /* ??? Currently we pass lhs_type == ERROR_MARK and rhs_type == ERROR_MARK for everything that is not a binary expression. This makes warn_about_parentheses miss some warnings that involve unary operators. For unary expressions we should pass the correct tree_code unless the unary expression was surrounded by parentheses. */ if (no_toplevel_fold_p && lookahead_prec <= prec && sp == stack && TREE_CODE_CLASS (tree_type) == tcc_comparison) lhs = build2 (tree_type, boolean_type_node, lhs, rhs); else lhs = build_x_binary_op (tree_type, lhs, lhs_type, rhs, rhs_type, &overload, tf_warning_or_error); lhs_type = tree_type; /* If the binary operator required the use of an overloaded operator, then this expression cannot be an integral constant-expression. An overloaded operator can be used even if both operands are otherwise permissible in an integral constant-expression if at least one of the operands is of enumeration type. */ if (overload && cp_parser_non_integral_constant_expression (parser, NIC_OVERLOADED)) return error_mark_node; } return lhs; } /* Parse the `? expression : assignment-expression' part of a conditional-expression. The LOGICAL_OR_EXPR is the logical-or-expression that started the conditional-expression. Returns a representation of the entire conditional-expression. This routine is used by cp_parser_assignment_expression. ? expression : assignment-expression GNU Extensions: ? : assignment-expression */ static tree cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr) { tree expr; tree assignment_expr; struct cp_token *token; /* Consume the `?' token. */ cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_COLON) { pedwarn (token->location, OPT_pedantic, "ISO C++ does not allow ?: with omitted middle operand"); /* Implicit true clause. */ expr = NULL_TREE; c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_true_node; warn_for_omitted_condop (token->location, logical_or_expr); } else { bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* Parse the expression. */ c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_false_node; expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); c_inhibit_evaluation_warnings += ((logical_or_expr == truthvalue_true_node) - (logical_or_expr == truthvalue_false_node)); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* The next token should be a `:'. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* Parse the assignment-expression. */ assignment_expr = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); c_inhibit_evaluation_warnings -= logical_or_expr == truthvalue_true_node; /* Build the conditional-expression. */ return build_x_conditional_expr (logical_or_expr, expr, assignment_expr, tf_warning_or_error); } /* Parse an assignment-expression. assignment-expression: conditional-expression logical-or-expression assignment-operator assignment_expression throw-expression CAST_P is true if this expression is the target of a cast. Returns a representation for the expression. */ static tree cp_parser_assignment_expression (cp_parser* parser, bool cast_p, cp_id_kind * pidk) { tree expr; /* If the next token is the `throw' keyword, then we're looking at a throw-expression. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW)) expr = cp_parser_throw_expression (parser); /* Otherwise, it must be that we are looking at a logical-or-expression. */ else { /* Parse the binary expressions (logical-or-expression). */ expr = cp_parser_binary_expression (parser, cast_p, false, PREC_NOT_OPERATOR, pidk); /* If the next token is a `?' then we're actually looking at a conditional-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY)) return cp_parser_question_colon_clause (parser, expr); else { enum tree_code assignment_operator; /* If it's an assignment-operator, we're using the second production. */ assignment_operator = cp_parser_assignment_operator_opt (parser); if (assignment_operator != ERROR_MARK) { bool non_constant_p; /* Parse the right-hand side of the assignment. */ tree rhs = cp_parser_initializer_clause (parser, &non_constant_p); if (BRACE_ENCLOSED_INITIALIZER_P (rhs)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); /* An assignment may not appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_ASSIGNMENT)) return error_mark_node; /* Build the assignment expression. */ expr = build_x_modify_expr (expr, assignment_operator, rhs, tf_warning_or_error); } } } return expr; } /* Parse an (optional) assignment-operator. assignment-operator: one of = *= /= %= += -= >>= <<= &= ^= |= GNU Extension: assignment-operator: one of <?= >?= If the next token is an assignment operator, the corresponding tree code is returned, and the token is consumed. For example, for `+=', PLUS_EXPR is returned. For `=' itself, the code returned is NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%', TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment operator, ERROR_MARK is returned. */ static enum tree_code cp_parser_assignment_operator_opt (cp_parser* parser) { enum tree_code op; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EQ: op = NOP_EXPR; break; case CPP_MULT_EQ: op = MULT_EXPR; break; case CPP_DIV_EQ: op = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: op = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: op = PLUS_EXPR; break; case CPP_MINUS_EQ: op = MINUS_EXPR; break; case CPP_RSHIFT_EQ: op = RSHIFT_EXPR; break; case CPP_LSHIFT_EQ: op = LSHIFT_EXPR; break; case CPP_AND_EQ: op = BIT_AND_EXPR; break; case CPP_XOR_EQ: op = BIT_XOR_EXPR; break; case CPP_OR_EQ: op = BIT_IOR_EXPR; break; default: /* Nothing else is an assignment operator. */ op = ERROR_MARK; } /* If it was an assignment operator, consume it. */ if (op != ERROR_MARK) cp_lexer_consume_token (parser->lexer); return op; } /* Parse an expression. expression: assignment-expression expression , assignment-expression CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_expression (cp_parser* parser, bool cast_p, cp_id_kind * pidk) { tree expression = NULL_TREE; while (true) { tree assignment_expression; /* Parse the next assignment-expression. */ assignment_expression = cp_parser_assignment_expression (parser, cast_p, pidk); /* If this is the first assignment-expression, we can just save it away. */ if (!expression) expression = assignment_expression; else expression = build_x_compound_expr (expression, assignment_expression, tf_warning_or_error); /* If the next token is not a comma, then we are done with the expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* A comma operator cannot appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_COMMA)) expression = error_mark_node; } return expression; } /* Parse a constant-expression. constant-expression: conditional-expression If ALLOW_NON_CONSTANT_P a non-constant expression is silently accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P is false, NON_CONSTANT_P should be NULL. */ static tree cp_parser_constant_expression (cp_parser* parser, bool allow_non_constant_p, bool *non_constant_p) { bool saved_integral_constant_expression_p; bool saved_allow_non_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; tree expression; /* It might seem that we could simply parse the conditional-expression, and then check to see if it were TREE_CONSTANT. However, an expression that is TREE_CONSTANT is one that the compiler can figure out is constant, possibly after doing some simplifications or optimizations. The standard has a precise definition of constant-expression, and we must honor that, even though it is somewhat more restrictive. For example: int i[(2, 3)]; is not a legal declaration, because `(2, 3)' is not a constant-expression. The `,' operator is forbidden in a constant-expression. However, GCC's constant-folding machinery will fold this operation to an INTEGER_CST for `3'. */ /* Save the old settings. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_allow_non_integral_constant_expression_p = parser->allow_non_integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; /* We are now parsing a constant-expression. */ parser->integral_constant_expression_p = true; parser->allow_non_integral_constant_expression_p = (allow_non_constant_p || cxx_dialect >= cxx0x); parser->non_integral_constant_expression_p = false; /* Although the grammar says "conditional-expression", we parse an "assignment-expression", which also permits "throw-expression" and the use of assignment operators. In the case that ALLOW_NON_CONSTANT_P is false, we get better errors than we would otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is actually essential that we look for an assignment-expression. For example, cp_parser_initializer_clauses uses this function to determine whether a particular assignment-expression is in fact constant. */ expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); /* Restore the old settings. */ parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->allow_non_integral_constant_expression_p = saved_allow_non_integral_constant_expression_p; if (cxx_dialect >= cxx0x) { /* Require an rvalue constant expression here; that's what our callers expect. Reference constant expressions are handled separately in e.g. cp_parser_template_argument. */ bool is_const = potential_rvalue_constant_expression (expression); parser->non_integral_constant_expression_p = !is_const; if (!is_const && !allow_non_constant_p) require_potential_rvalue_constant_expression (expression); } if (allow_non_constant_p) *non_constant_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expression; } /* Parse __builtin_offsetof. offsetof-expression: "__builtin_offsetof" "(" type-id "," offsetof-member-designator ")" offsetof-member-designator: id-expression | offsetof-member-designator "." id-expression | offsetof-member-designator "[" expression "]" | offsetof-member-designator "->" id-expression */ static tree cp_parser_builtin_offsetof (cp_parser *parser) { int save_ice_p, save_non_ice_p; tree type, expr; cp_id_kind dummy; cp_token *token; /* We're about to accept non-integral-constant things, but will definitely yield an integral constant expression. Save and restore these values around our local parsing. */ save_ice_p = parser->integral_constant_expression_p; save_non_ice_p = parser->non_integral_constant_expression_p; /* Consume the "__builtin_offsetof" token. */ cp_lexer_consume_token (parser->lexer); /* Consume the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); token = cp_lexer_peek_token (parser->lexer); /* Build the (type *)null that begins the traditional offsetof macro. */ expr = build_static_cast (build_pointer_type (type), null_pointer_node, tf_warning_or_error); /* Parse the offsetof-member-designator. We begin as if we saw "expr->". */ expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr, true, &dummy, token->location); while (true) { token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: /* offsetof-member-designator "[" expression "]" */ expr = cp_parser_postfix_open_square_expression (parser, expr, true); break; case CPP_DEREF: /* offsetof-member-designator "->" identifier */ expr = grok_array_decl (expr, integer_zero_node); /* FALLTHRU */ case CPP_DOT: /* offsetof-member-designator "." identifier */ cp_lexer_consume_token (parser->lexer); expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT, expr, true, &dummy, token->location); break; case CPP_CLOSE_PAREN: /* Consume the ")" token. */ cp_lexer_consume_token (parser->lexer); goto success; default: /* Error. We know the following require will fail, but that gives the proper error message. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_skip_to_closing_parenthesis (parser, true, false, true); expr = error_mark_node; goto failure; } } success: /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) expr = build1 (OFFSETOF_EXPR, size_type_node, expr); else expr = finish_offsetof (expr); failure: parser->integral_constant_expression_p = save_ice_p; parser->non_integral_constant_expression_p = save_non_ice_p; return expr; } /* Parse a trait expression. Returns a representation of the expression, the underlying type of the type at issue when KEYWORD is RID_UNDERLYING_TYPE. */ static tree cp_parser_trait_expr (cp_parser* parser, enum rid keyword) { cp_trait_kind kind; tree type1, type2 = NULL_TREE; bool binary = false; cp_decl_specifier_seq decl_specs; switch (keyword) { case RID_HAS_NOTHROW_ASSIGN: kind = CPTK_HAS_NOTHROW_ASSIGN; break; case RID_HAS_NOTHROW_CONSTRUCTOR: kind = CPTK_HAS_NOTHROW_CONSTRUCTOR; break; case RID_HAS_NOTHROW_COPY: kind = CPTK_HAS_NOTHROW_COPY; break; case RID_HAS_TRIVIAL_ASSIGN: kind = CPTK_HAS_TRIVIAL_ASSIGN; break; case RID_HAS_TRIVIAL_CONSTRUCTOR: kind = CPTK_HAS_TRIVIAL_CONSTRUCTOR; break; case RID_HAS_TRIVIAL_COPY: kind = CPTK_HAS_TRIVIAL_COPY; break; case RID_HAS_TRIVIAL_DESTRUCTOR: kind = CPTK_HAS_TRIVIAL_DESTRUCTOR; break; case RID_HAS_VIRTUAL_DESTRUCTOR: kind = CPTK_HAS_VIRTUAL_DESTRUCTOR; break; case RID_IS_ABSTRACT: kind = CPTK_IS_ABSTRACT; break; case RID_IS_BASE_OF: kind = CPTK_IS_BASE_OF; binary = true; break; case RID_IS_CLASS: kind = CPTK_IS_CLASS; break; case RID_IS_CONVERTIBLE_TO: kind = CPTK_IS_CONVERTIBLE_TO; binary = true; break; case RID_IS_EMPTY: kind = CPTK_IS_EMPTY; break; case RID_IS_ENUM: kind = CPTK_IS_ENUM; break; case RID_IS_FINAL: kind = CPTK_IS_FINAL; break; case RID_IS_LITERAL_TYPE: kind = CPTK_IS_LITERAL_TYPE; break; case RID_IS_POD: kind = CPTK_IS_POD; break; case RID_IS_POLYMORPHIC: kind = CPTK_IS_POLYMORPHIC; break; case RID_IS_STD_LAYOUT: kind = CPTK_IS_STD_LAYOUT; break; case RID_IS_TRIVIAL: kind = CPTK_IS_TRIVIAL; break; case RID_IS_UNION: kind = CPTK_IS_UNION; break; case RID_UNDERLYING_TYPE: kind = CPTK_UNDERLYING_TYPE; break; case RID_BASES: kind = CPTK_BASES; break; case RID_DIRECT_BASES: kind = CPTK_DIRECT_BASES; break; default: gcc_unreachable (); } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); type1 = cp_parser_type_id (parser); if (type1 == error_mark_node) return error_mark_node; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type1; /* Call grokdeclarator to figure out what type this is. */ type1 = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); if (binary) { cp_parser_require (parser, CPP_COMMA, RT_COMMA); type2 = cp_parser_type_id (parser); if (type2 == error_mark_node) return error_mark_node; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type2; /* Call grokdeclarator to figure out what type this is. */ type2 = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Complete the trait expression, which may mean either processing the trait expr now or saving it for template instantiation. */ switch(kind) { case CPTK_UNDERLYING_TYPE: return finish_underlying_type (type1); case CPTK_BASES: return finish_bases (type1, false); case CPTK_DIRECT_BASES: return finish_bases (type1, true); default: return finish_trait_expr (kind, type1, type2); } } /* Lambdas that appear in variable initializer or default argument scope get that in their mangling, so we need to record it. We might as well use the count for function and namespace scopes as well. */ static GTY(()) tree lambda_scope; static GTY(()) int lambda_count; typedef struct GTY(()) tree_int { tree t; int i; } tree_int; DEF_VEC_O(tree_int); DEF_VEC_ALLOC_O(tree_int,gc); static GTY(()) VEC(tree_int,gc) *lambda_scope_stack; static void start_lambda_scope (tree decl) { tree_int ti; gcc_assert (decl); /* Once we're inside a function, we ignore other scopes and just push the function again so that popping works properly. */ if (current_function_decl && TREE_CODE (decl) != FUNCTION_DECL) decl = current_function_decl; ti.t = lambda_scope; ti.i = lambda_count; VEC_safe_push (tree_int, gc, lambda_scope_stack, &ti); if (lambda_scope != decl) { /* Don't reset the count if we're still in the same function. */ lambda_scope = decl; lambda_count = 0; } } static void record_lambda_scope (tree lambda) { LAMBDA_EXPR_EXTRA_SCOPE (lambda) = lambda_scope; LAMBDA_EXPR_DISCRIMINATOR (lambda) = lambda_count++; } static void finish_lambda_scope (void) { tree_int *p = VEC_last (tree_int, lambda_scope_stack); if (lambda_scope != p->t) { lambda_scope = p->t; lambda_count = p->i; } VEC_pop (tree_int, lambda_scope_stack); } /* Parse a lambda expression. lambda-expression: lambda-introducer lambda-declarator [opt] compound-statement Returns a representation of the expression. */ static tree cp_parser_lambda_expression (cp_parser* parser) { tree lambda_expr = build_lambda_expr (); tree type; bool ok; LAMBDA_EXPR_LOCATION (lambda_expr) = cp_lexer_peek_token (parser->lexer)->location; if (cp_unevaluated_operand) error_at (LAMBDA_EXPR_LOCATION (lambda_expr), "lambda-expression in unevaluated context"); /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); cp_parser_lambda_introducer (parser, lambda_expr); type = begin_lambda_type (lambda_expr); if (type == error_mark_node) return error_mark_node; record_lambda_scope (lambda_expr); /* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */ determine_visibility (TYPE_NAME (type)); /* Now that we've started the type, add the capture fields for any explicit captures. */ register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr)); { /* Inside the class, surrounding template-parameter-lists do not apply. */ unsigned int saved_num_template_parameter_lists = parser->num_template_parameter_lists; unsigned char in_statement = parser->in_statement; bool in_switch_statement_p = parser->in_switch_statement_p; parser->num_template_parameter_lists = 0; parser->in_statement = 0; parser->in_switch_statement_p = false; /* By virtue of defining a local class, a lambda expression has access to the private variables of enclosing classes. */ ok = cp_parser_lambda_declarator_opt (parser, lambda_expr); if (ok) cp_parser_lambda_body (parser, lambda_expr); else if (cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) cp_parser_skip_to_end_of_block_or_statement (parser); /* The capture list was built up in reverse order; fix that now. */ { tree newlist = NULL_TREE; tree elt, next; for (elt = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); elt; elt = next) { next = TREE_CHAIN (elt); TREE_CHAIN (elt) = newlist; newlist = elt; } LAMBDA_EXPR_CAPTURE_LIST (lambda_expr) = newlist; } if (ok) maybe_add_lambda_conv_op (type); type = finish_struct (type, /*attributes=*/NULL_TREE); parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_statement = in_statement; parser->in_switch_statement_p = in_switch_statement_p; } pop_deferring_access_checks (); /* This field is only used during parsing of the lambda. */ LAMBDA_EXPR_THIS_CAPTURE (lambda_expr) = NULL_TREE; /* This lambda shouldn't have any proxies left at this point. */ gcc_assert (LAMBDA_EXPR_PENDING_PROXIES (lambda_expr) == NULL); /* And now that we're done, push proxies for an enclosing lambda. */ insert_pending_capture_proxies (); if (ok) return build_lambda_object (lambda_expr); else return error_mark_node; } /* Parse the beginning of a lambda expression. lambda-introducer: [ lambda-capture [opt] ] LAMBDA_EXPR is the current representation of the lambda expression. */ static void cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr) { /* Need commas after the first capture. */ bool first = true; /* Eat the leading `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE); /* Record default capture mode. "[&" "[=" "[&," "[=," */ if (cp_lexer_next_token_is (parser->lexer, CPP_AND) && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_NAME) LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_REFERENCE; else if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_COPY; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE) { cp_lexer_consume_token (parser->lexer); first = false; } while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE)) { cp_token* capture_token; tree capture_id; tree capture_init_expr; cp_id_kind idk = CP_ID_KIND_NONE; bool explicit_init_p = false; enum capture_kind_type { BY_COPY, BY_REFERENCE }; enum capture_kind_type capture_kind = BY_COPY; if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { error ("expected end of capture-list"); return; } if (first) first = false; else cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Possibly capture `this'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THIS)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY) pedwarn (loc, 0, "explicit by-copy capture of %<this%> redundant " "with by-copy capture default"); cp_lexer_consume_token (parser->lexer); add_capture (lambda_expr, /*id=*/this_identifier, /*initializer=*/finish_this_expr(), /*by_reference_p=*/false, explicit_init_p); continue; } /* Remember whether we want to capture as a reference or not. */ if (cp_lexer_next_token_is (parser->lexer, CPP_AND)) { capture_kind = BY_REFERENCE; cp_lexer_consume_token (parser->lexer); } /* Get the identifier. */ capture_token = cp_lexer_peek_token (parser->lexer); capture_id = cp_parser_identifier (parser); if (capture_id == error_mark_node) /* Would be nice to have a cp_parser_skip_to_closing_x for general delimiters, but I modified this to stop on unnested ']' as well. It was already changed to stop on unnested '}', so the "closing_parenthesis" name is no more misleading with my change. */ { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); break; } /* Find the initializer for this capture. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* An explicit expression exists. */ cp_lexer_consume_token (parser->lexer); pedwarn (input_location, OPT_pedantic, "ISO C++ does not allow initializers " "in lambda expression capture lists"); capture_init_expr = cp_parser_assignment_expression (parser, /*cast_p=*/true, &idk); explicit_init_p = true; } else { const char* error_msg; /* Turn the identifier into an id-expression. */ capture_init_expr = cp_parser_lookup_name (parser, capture_id, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, capture_token->location); if (capture_init_expr == error_mark_node) { unqualified_name_lookup_error (capture_id); continue; } else if (DECL_P (capture_init_expr) && (TREE_CODE (capture_init_expr) != VAR_DECL && TREE_CODE (capture_init_expr) != PARM_DECL)) { error_at (capture_token->location, "capture of non-variable %qD ", capture_init_expr); inform (0, "%q+#D declared here", capture_init_expr); continue; } if (TREE_CODE (capture_init_expr) == VAR_DECL && decl_storage_duration (capture_init_expr) != dk_auto) { pedwarn (capture_token->location, 0, "capture of variable " "%qD with non-automatic storage duration", capture_init_expr); inform (0, "%q+#D declared here", capture_init_expr); continue; } capture_init_expr = finish_id_expression (capture_id, capture_init_expr, parser->scope, &idk, /*integral_constant_expression_p=*/false, /*allow_non_integral_constant_expression_p=*/false, /*non_integral_constant_expression_p=*/NULL, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, capture_token->location); } if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE && !explicit_init_p) { if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY && capture_kind == BY_COPY) pedwarn (capture_token->location, 0, "explicit by-copy capture " "of %qD redundant with by-copy capture default", capture_id); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_REFERENCE && capture_kind == BY_REFERENCE) pedwarn (capture_token->location, 0, "explicit by-reference " "capture of %qD redundant with by-reference capture " "default", capture_id); } add_capture (lambda_expr, capture_id, capture_init_expr, /*by_reference_p=*/capture_kind == BY_REFERENCE, explicit_init_p); } cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); } /* Parse the (optional) middle of a lambda expression. lambda-declarator: ( parameter-declaration-clause [opt] ) attribute-specifier [opt] mutable [opt] exception-specification [opt] lambda-return-type-clause [opt] LAMBDA_EXPR is the current representation of the lambda expression. */ static bool cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr) { /* 5.1.1.4 of the standard says: If a lambda-expression does not include a lambda-declarator, it is as if the lambda-declarator were (). This means an empty parameter list, no attributes, and no exception specification. */ tree param_list = void_list_node; tree attributes = NULL_TREE; tree exception_spec = NULL_TREE; tree t; /* The lambda-declarator is optional, but must begin with an opening parenthesis if present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); begin_scope (sk_function_parms, /*entity=*/NULL_TREE); /* Parse parameters. */ param_list = cp_parser_parameter_declaration_clause (parser); /* Default arguments shall not be specified in the parameter-declaration-clause of a lambda-declarator. */ for (t = param_list; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) pedwarn (DECL_SOURCE_LOCATION (TREE_VALUE (t)), OPT_pedantic, "default argument specified for lambda parameter"); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); attributes = cp_parser_attributes_opt (parser); /* Parse optional `mutable' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_MUTABLE)) { cp_lexer_consume_token (parser->lexer); LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1; } /* Parse optional exception specification. */ exception_spec = cp_parser_exception_specification_opt (parser); /* Parse optional trailing return type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_DEREF)) { cp_lexer_consume_token (parser->lexer); LAMBDA_EXPR_RETURN_TYPE (lambda_expr) = cp_parser_type_id (parser); } /* The function parameters must be in scope all the way until after the trailing-return-type in case of decltype. */ for (t = current_binding_level->names; t; t = DECL_CHAIN (t)) pop_binding (DECL_NAME (t), t); leave_scope (); } /* Create the function call operator. Messing with declarators like this is no uglier than building up the FUNCTION_DECL by hand, and this is less likely to get out of sync with other code. */ { cp_decl_specifier_seq return_type_specs; cp_declarator* declarator; tree fco; int quals; void *p; clear_decl_specs (&return_type_specs); if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr)) return_type_specs.type = LAMBDA_EXPR_RETURN_TYPE (lambda_expr); else /* Maybe we will deduce the return type later, but we can use void as a placeholder return type anyways. */ return_type_specs.type = void_type_node; p = obstack_alloc (&declarator_obstack, 0); declarator = make_id_declarator (NULL_TREE, ansi_opname (CALL_EXPR), sfk_none); quals = (LAMBDA_EXPR_MUTABLE_P (lambda_expr) ? TYPE_UNQUALIFIED : TYPE_QUAL_CONST); declarator = make_call_declarator (declarator, param_list, quals, VIRT_SPEC_UNSPECIFIED, exception_spec, /*late_return_type=*/NULL_TREE); declarator->id_loc = LAMBDA_EXPR_LOCATION (lambda_expr); fco = grokmethod (&return_type_specs, declarator, attributes); if (fco != error_mark_node) { DECL_INITIALIZED_IN_CLASS_P (fco) = 1; DECL_ARTIFICIAL (fco) = 1; /* Give the object parameter a different name. */ DECL_NAME (DECL_ARGUMENTS (fco)) = get_identifier ("__closure"); } finish_member_declaration (fco); obstack_free (&declarator_obstack, p); return (fco != error_mark_node); } } /* Parse the body of a lambda expression, which is simply compound-statement but which requires special handling. LAMBDA_EXPR is the current representation of the lambda expression. */ static void cp_parser_lambda_body (cp_parser* parser, tree lambda_expr) { bool nested = (current_function_decl != NULL_TREE); bool local_variables_forbidden_p = parser->local_variables_forbidden_p; if (nested) push_function_context (); else /* Still increment function_depth so that we don't GC in the middle of an expression. */ ++function_depth; /* Clear this in case we're in the middle of a default argument. */ parser->local_variables_forbidden_p = false; /* Finish the function call operator - class_specifier + late_parsing_for_member + function_definition_after_declarator + ctor_initializer_opt_and_function_body */ { tree fco = lambda_function (lambda_expr); tree body; bool done = false; tree compound_stmt; tree cap; /* Let the front end know that we are going to be defining this function. */ start_preparsed_function (fco, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); start_lambda_scope (fco); body = begin_function_body (); if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) goto out; /* Push the proxies for any explicit captures. */ for (cap = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); cap; cap = TREE_CHAIN (cap)) build_capture_proxy (TREE_PURPOSE (cap)); compound_stmt = begin_compound_stmt (0); /* 5.1.1.4 of the standard says: If a lambda-expression does not include a trailing-return-type, it is as if the trailing-return-type denotes the following type: * if the compound-statement is of the form { return attribute-specifier [opt] expression ; } the type of the returned expression after lvalue-to-rvalue conversion (_conv.lval_ 4.1), array-to-pointer conversion (_conv.array_ 4.2), and function-to-pointer conversion (_conv.func_ 4.3); * otherwise, void. */ /* In a lambda that has neither a lambda-return-type-clause nor a deducible form, errors should be reported for return statements in the body. Since we used void as the placeholder return type, parsing the body as usual will give such desired behavior. */ if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr) && cp_lexer_peek_nth_token (parser->lexer, 1)->keyword == RID_RETURN && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SEMICOLON) { tree expr = NULL_TREE; cp_id_kind idk = CP_ID_KIND_NONE; /* Parse tentatively in case there's more after the initial return statement. */ cp_parser_parse_tentatively (parser); cp_parser_require_keyword (parser, RID_RETURN, RT_RETURN); expr = cp_parser_expression (parser, /*cast_p=*/false, &idk); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); if (cp_parser_parse_definitely (parser)) { apply_lambda_return_type (lambda_expr, lambda_return_type (expr)); /* Will get error here if type not deduced yet. */ finish_return_stmt (expr); done = true; } } if (!done) { if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr)) LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = true; while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = false; } finish_compound_stmt (compound_stmt); out: finish_function_body (body); finish_lambda_scope (); /* Finish the function and generate code for it if necessary. */ expand_or_defer_fn (finish_function (/*inline*/2)); } parser->local_variables_forbidden_p = local_variables_forbidden_p; if (nested) pop_function_context(); else --function_depth; } /* Statements [gram.stmt.stmt] */ /* Parse a statement. statement: labeled-statement expression-statement compound-statement selection-statement iteration-statement jump-statement declaration-statement try-block TM Extension: statement: atomic-statement IN_COMPOUND is true when the statement is nested inside a cp_parser_compound_statement; this matters for certain pragmas. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void cp_parser_statement (cp_parser* parser, tree in_statement_expr, bool in_compound, bool *if_p) { tree statement; cp_token *token; location_t statement_location; restart: if (if_p != NULL) *if_p = false; /* There is no statement yet. */ statement = NULL_TREE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Remember the location of the first token in the statement. */ statement_location = token->location; /* If this is a keyword, then that will often determine what kind of statement we have. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_CASE: case RID_DEFAULT: /* Looks like a labeled-statement with a case label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser); goto restart; case RID_IF: case RID_SWITCH: statement = cp_parser_selection_statement (parser, if_p); break; case RID_WHILE: case RID_DO: case RID_FOR: statement = cp_parser_iteration_statement (parser); break; case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: statement = cp_parser_jump_statement (parser); break; /* Objective-C++ exception-handling constructs. */ case RID_AT_TRY: case RID_AT_CATCH: case RID_AT_FINALLY: case RID_AT_SYNCHRONIZED: case RID_AT_THROW: statement = cp_parser_objc_statement (parser); break; case RID_TRY: statement = cp_parser_try_block (parser); break; case RID_NAMESPACE: /* This must be a namespace alias definition. */ cp_parser_declaration_statement (parser); return; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: statement = cp_parser_transaction (parser, keyword); break; case RID_TRANSACTION_CANCEL: statement = cp_parser_transaction_cancel (parser); break; default: /* It might be a keyword like `int' that can start a declaration-statement. */ break; } } else if (token->type == CPP_NAME) { /* If the next token is a `:', then we are looking at a labeled-statement. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON) { /* Looks like a labeled-statement with an ordinary label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser); goto restart; } } /* Anything that starts with a `{' must be a compound-statement. */ else if (token->type == CPP_OPEN_BRACE) statement = cp_parser_compound_statement (parser, NULL, false, false); /* CPP_PRAGMA is a #pragma inside a function body, which constitutes a statement all its own. */ else if (token->type == CPP_PRAGMA) { /* Only certain OpenMP pragmas are attached to statements, and thus are considered statements themselves. All others are not. In the context of a compound, accept the pragma as a "statement" and return so that we can check for a close brace. Otherwise we require a real statement and must go back and read one. */ if (in_compound) cp_parser_pragma (parser, pragma_compound); else if (!cp_parser_pragma (parser, pragma_stmt)) goto restart; return; } else if (token->type == CPP_EOF) { cp_parser_error (parser, "expected statement"); return; } /* Everything else must be a declaration-statement or an expression-statement. Try for the declaration-statement first, unless we are looking at a `;', in which case we know that we have an expression-statement. */ if (!statement) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_parse_tentatively (parser); /* Try to parse the declaration-statement. */ cp_parser_declaration_statement (parser); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return; } /* Look for an expression-statement instead. */ statement = cp_parser_expression_statement (parser, in_statement_expr); } /* Set the line number for the statement. */ if (statement && STATEMENT_CODE_P (TREE_CODE (statement))) SET_EXPR_LOCATION (statement, statement_location); } /* Parse the label for a labeled-statement, i.e. identifier : case constant-expression : default : GNU Extension: case constant-expression ... constant-expression : statement When a label is parsed without errors, the label is added to the parse tree by the finish_* functions, so this function doesn't have to return the label. */ static void cp_parser_label_for_labeled_statement (cp_parser* parser) { cp_token *token; tree label = NULL_TREE; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; /* The next token should be an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_KEYWORD) { cp_parser_error (parser, "expected labeled-statement"); return; } parser->colon_corrects_to_scope_p = false; switch (token->keyword) { case RID_CASE: { tree expr, expr_hi; cp_token *ellipsis; /* Consume the `case' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the constant-expression. */ expr = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); ellipsis = cp_lexer_peek_token (parser->lexer); if (ellipsis->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); expr_hi = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); /* We don't need to emit warnings here, as the common code will do this for us. */ } else expr_hi = NULL_TREE; if (parser->in_switch_statement_p) finish_case_label (token->location, expr, expr_hi); else error_at (token->location, "case label %qE not within a switch statement", expr); } break; case RID_DEFAULT: /* Consume the `default' token. */ cp_lexer_consume_token (parser->lexer); if (parser->in_switch_statement_p) finish_case_label (token->location, NULL_TREE, NULL_TREE); else error_at (token->location, "case label not within a switch statement"); break; default: /* Anything else must be an ordinary label. */ label = finish_label_stmt (cp_parser_identifier (parser)); break; } /* Require the `:' token. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* An ordinary label may optionally be followed by attributes. However, this is only permitted if the attributes are then followed by a semicolon. This is because, for backward compatibility, when parsing lab: __attribute__ ((unused)) int i; we want the attribute to attach to "i", not "lab". */ if (label != NULL_TREE && cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) { tree attrs; cp_parser_parse_tentatively (parser); attrs = cp_parser_attributes_opt (parser); if (attrs == NULL_TREE || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_abort_tentative_parse (parser); else if (!cp_parser_parse_definitely (parser)) ; else cplus_decl_attributes (&label, attrs, 0); } parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* Parse an expression-statement. expression-statement: expression [opt] ; Returns the new EXPR_STMT -- or NULL_TREE if the expression statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P indicates whether this expression-statement is part of an expression statement. */ static tree cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr) { tree statement = NULL_TREE; cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is a ';', then there is no expression statement. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) statement = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* Give a helpful message for "A<T>::type t;" and the like. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) { if (TREE_CODE (statement) == SCOPE_REF) error_at (token->location, "need %<typename%> before %qE because " "%qT is a dependent scope", statement, TREE_OPERAND (statement, 0)); else if (is_overloaded_fn (statement) && DECL_CONSTRUCTOR_P (get_first_fn (statement))) { /* A::A a; */ tree fn = get_first_fn (statement); error_at (token->location, "%<%T::%D%> names the constructor, not the type", DECL_CONTEXT (fn), DECL_NAME (fn)); } } /* Consume the final `;'. */ cp_parser_consume_semicolon_at_end_of_statement (parser); if (in_statement_expr && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) /* This is the final expression statement of a statement expression. */ statement = finish_stmt_expr_expr (statement, in_statement_expr); else if (statement) statement = finish_expr_stmt (statement); else finish_stmt (); return statement; } /* Parse a compound-statement. compound-statement: { statement-seq [opt] } GNU extension: compound-statement: { label-declaration-seq [opt] statement-seq [opt] } label-declaration-seq: label-declaration label-declaration-seq label-declaration Returns a tree representing the statement. */ static tree cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr, bool in_try, bool function_body) { tree compound_stmt; /* Consume the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) return error_mark_node; if (DECL_DECLARED_CONSTEXPR_P (current_function_decl) && !function_body) pedwarn (input_location, OPT_pedantic, "compound-statement in constexpr function"); /* Begin the compound-statement. */ compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0); /* If the next keyword is `__label__' we have a label declaration. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, in_statement_expr); /* Finish the compound-statement. */ finish_compound_stmt (compound_stmt); /* Consume the `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); return compound_stmt; } /* Parse an (optional) statement-seq. statement-seq: statement statement-seq [opt] statement */ static void cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr) { /* Scan statements until there aren't any more. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If we are looking at a `}', then we have run out of statements; the same is true if we have reached the end of file, or have stumbled upon a stray '@end'. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL || (token->type == CPP_KEYWORD && token->keyword == RID_AT_END)) break; /* If we are in a compound statement and find 'else' then something went wrong. */ else if (token->type == CPP_KEYWORD && token->keyword == RID_ELSE) { if (parser->in_statement & IN_IF_STMT) break; else { token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "%<else%> without a previous %<if%>"); } } /* Parse the statement. */ cp_parser_statement (parser, in_statement_expr, true, NULL); } } /* Parse a selection-statement. selection-statement: if ( condition ) statement if ( condition ) statement else statement switch ( condition ) statement Returns the new IF_STMT or SWITCH_STMT. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static tree cp_parser_selection_statement (cp_parser* parser, bool *if_p) { cp_token *token; enum rid keyword; if (if_p != NULL) *if_p = false; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_SELECT); /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_IF: case RID_SWITCH: { tree statement; tree condition; /* Look for the `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } /* Begin the selection-statement. */ if (keyword == RID_IF) statement = begin_if_stmt (); else statement = begin_switch_stmt (); /* Parse the condition. */ condition = cp_parser_condition (parser); /* Look for the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); if (keyword == RID_IF) { bool nested_if; unsigned char in_statement; /* Add the condition. */ finish_if_stmt_cond (condition, statement); /* Parse the then-clause. */ in_statement = parser->in_statement; parser->in_statement |= IN_IF_STMT; if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; add_stmt (build_empty_stmt (loc)); cp_lexer_consume_token (parser->lexer); if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around " "empty body in an %<if%> statement"); nested_if = false; } else cp_parser_implicitly_scoped_statement (parser, &nested_if); parser->in_statement = in_statement; finish_then_clause (statement); /* If the next token is `else', parse the else-clause. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) { /* Consume the `else' keyword. */ cp_lexer_consume_token (parser->lexer); begin_else_clause (statement); /* Parse the else-clause. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around " "empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); cp_lexer_consume_token (parser->lexer); } else cp_parser_implicitly_scoped_statement (parser, NULL); finish_else_clause (statement); /* If we are currently parsing a then-clause, then IF_P will not be NULL. We set it to true to indicate that this if statement has an else clause. This may trigger the Wparentheses warning below when we get back up to the parent if statement. */ if (if_p != NULL) *if_p = true; } else { /* This if statement does not have an else clause. If NESTED_IF is true, then the then-clause is an if statement which does have an else clause. We warn about the potential ambiguity. */ if (nested_if) warning_at (EXPR_LOCATION (statement), OPT_Wparentheses, "suggest explicit braces to avoid ambiguous" " %<else%>"); } /* Now we're all done with the if-statement. */ finish_if_stmt (statement); } else { bool in_switch_statement_p; unsigned char in_statement; /* Add the condition. */ finish_switch_cond (condition, statement); /* Parse the body of the switch-statement. */ in_switch_statement_p = parser->in_switch_statement_p; in_statement = parser->in_statement; parser->in_switch_statement_p = true; parser->in_statement |= IN_SWITCH_STMT; cp_parser_implicitly_scoped_statement (parser, NULL); parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; /* Now we're all done with the switch-statement. */ finish_switch_stmt (statement); } return statement; } break; default: cp_parser_error (parser, "expected selection-statement"); return error_mark_node; } } /* Parse a condition. condition: expression type-specifier-seq declarator = initializer-clause type-specifier-seq declarator braced-init-list GNU Extension: condition: type-specifier-seq declarator asm-specification [opt] attributes [opt] = assignment-expression Returns the expression that should be tested. */ static tree cp_parser_condition (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; const char *saved_message; int declares_class_or_enum; /* Try the declaration first. */ cp_parser_parse_tentatively (parser); /* New types are not allowed in the type-specifier-seq for a condition. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in conditions"); /* Parse the type-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR, &type_specifiers, &declares_class_or_enum); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* If all is well, we might be looking at a declaration. */ if (!cp_parser_error_occurred (parser)) { tree decl; tree asm_specification; tree attributes; cp_declarator *declarator; tree initializer = NULL_TREE; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* If the next token is not an `=' or '{', then we might still be looking at an expression. For example: if (A(a).x) looks like a decl-specifier-seq and a declarator -- but then there is no `=', so this is an expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_simulate_error (parser); /* If we did see an `=' or '{', then we are looking at a declaration for sure. */ if (cp_parser_parse_definitely (parser)) { tree pushed_scope; bool non_constant_p; bool flags = LOOKUP_ONLYCONVERTING; /* Create the declaration. */ decl = start_decl (declarator, &type_specifiers, /*initialized_p=*/true, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); /* Parse the initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_braced_list (parser, &non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (initializer) = 1; flags = 0; } else { /* Consume the `='. */ cp_parser_require (parser, CPP_EQ, RT_EQ); initializer = cp_parser_initializer_clause (parser, &non_constant_p); } if (BRACE_ENCLOSED_INITIALIZER_P (initializer)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); /* Process the initializer. */ cp_finish_decl (decl, initializer, !non_constant_p, asm_specification, flags); if (pushed_scope) pop_scope (pushed_scope); return convert_from_reference (decl); } } /* If we didn't even get past the declarator successfully, we are definitely not looking at a declaration. */ else cp_parser_abort_tentative_parse (parser); /* Otherwise, we are looking at an expression. */ return cp_parser_expression (parser, /*cast_p=*/false, NULL); } /* Parses a for-statement or range-for-statement until the closing ')', not included. */ static tree cp_parser_for (cp_parser *parser) { tree init, scope, decl; bool is_range_for; /* Begin the for-statement. */ scope = begin_for_scope (&init); /* Parse the initialization. */ is_range_for = cp_parser_for_init_statement (parser, &decl); if (is_range_for) return cp_parser_range_for (parser, scope, init, decl); else return cp_parser_c_for (parser, scope, init); } static tree cp_parser_c_for (cp_parser *parser, tree scope, tree init) { /* Normal for loop */ tree condition = NULL_TREE; tree expression = NULL_TREE; tree stmt; stmt = begin_for_stmt (scope, init); /* The for-init-statement has already been parsed in cp_parser_for_init_statement, so no work is needed here. */ finish_for_init_stmt (stmt); /* If there's a condition, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) condition = cp_parser_condition (parser); finish_for_cond (condition, stmt); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* If there's an expression, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); finish_for_expr (expression, stmt); return stmt; } /* Tries to parse a range-based for-statement: range-based-for: decl-specifier-seq declarator : expression The decl-specifier-seq declarator and the `:' are already parsed by cp_parser_for_init_statement. If processing_template_decl it returns a newly created RANGE_FOR_STMT; if not, it is converted to a regular FOR_STMT. */ static tree cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl) { tree stmt, range_expr; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_non_constant_p; range_expr = cp_parser_braced_list (parser, &expr_non_constant_p); } else range_expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* If in template, STMT is converted to a normal for-statement at instantiation. If not, it is done just ahead. */ if (processing_template_decl) { if (check_for_bare_parameter_packs (range_expr)) range_expr = error_mark_node; stmt = begin_range_for_stmt (scope, init); finish_range_for_decl (stmt, range_decl, range_expr); if (!type_dependent_expression_p (range_expr) /* do_auto_deduction doesn't mess with template init-lists. */ && !BRACE_ENCLOSED_INITIALIZER_P (range_expr)) do_range_for_auto_deduction (range_decl, range_expr); } else { stmt = begin_for_stmt (scope, init); stmt = cp_convert_range_for (stmt, range_decl, range_expr); } return stmt; } /* Subroutine of cp_convert_range_for: given the initializer expression, builds up the range temporary. */ static tree build_range_temp (tree range_expr) { tree range_type, range_temp; /* Find out the type deduced by the declaration `auto &&__range = range_expr'. */ range_type = cp_build_reference_type (make_auto (), true); range_type = do_auto_deduction (range_type, range_expr, type_uses_auto (range_type)); /* Create the __range variable. */ range_temp = build_decl (input_location, VAR_DECL, get_identifier ("__for_range"), range_type); TREE_USED (range_temp) = 1; DECL_ARTIFICIAL (range_temp) = 1; return range_temp; } /* Used by cp_parser_range_for in template context: we aren't going to do a full conversion yet, but we still need to resolve auto in the type of the for-range-declaration if present. This is basically a shortcut version of cp_convert_range_for. */ static void do_range_for_auto_deduction (tree decl, tree range_expr) { tree auto_node = type_uses_auto (TREE_TYPE (decl)); if (auto_node) { tree begin_dummy, end_dummy, range_temp, iter_type, iter_decl; range_temp = convert_from_reference (build_range_temp (range_expr)); iter_type = (cp_parser_perform_range_for_lookup (range_temp, &begin_dummy, &end_dummy)); iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE, iter_type); iter_decl = build_x_indirect_ref (iter_decl, RO_NULL, tf_warning_or_error); TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), iter_decl, auto_node); } } /* Converts a range-based for-statement into a normal for-statement, as per the definition. for (RANGE_DECL : RANGE_EXPR) BLOCK should be equivalent to: { auto &&__range = RANGE_EXPR; for (auto __begin = BEGIN_EXPR, end = END_EXPR; __begin != __end; ++__begin) { RANGE_DECL = *__begin; BLOCK } } If RANGE_EXPR is an array: BEGIN_EXPR = __range END_EXPR = __range + ARRAY_SIZE(__range) Else if RANGE_EXPR has a member 'begin' or 'end': BEGIN_EXPR = __range.begin() END_EXPR = __range.end() Else: BEGIN_EXPR = begin(__range) END_EXPR = end(__range); If __range has a member 'begin' but not 'end', or vice versa, we must still use the second alternative (it will surely fail, however). When calling begin()/end() in the third alternative we must use argument dependent lookup, but always considering 'std' as an associated namespace. */ tree cp_convert_range_for (tree statement, tree range_decl, tree range_expr) { tree begin, end; tree iter_type, begin_expr, end_expr; tree condition, expression; if (range_decl == error_mark_node || range_expr == error_mark_node) /* If an error happened previously do nothing or else a lot of unhelpful errors would be issued. */ begin_expr = end_expr = iter_type = error_mark_node; else { tree range_temp = build_range_temp (range_expr); pushdecl (range_temp); cp_finish_decl (range_temp, range_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); range_temp = convert_from_reference (range_temp); iter_type = cp_parser_perform_range_for_lookup (range_temp, &begin_expr, &end_expr); } /* The new for initialization statement. */ begin = build_decl (input_location, VAR_DECL, get_identifier ("__for_begin"), iter_type); TREE_USED (begin) = 1; DECL_ARTIFICIAL (begin) = 1; pushdecl (begin); cp_finish_decl (begin, begin_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); end = build_decl (input_location, VAR_DECL, get_identifier ("__for_end"), iter_type); TREE_USED (end) = 1; DECL_ARTIFICIAL (end) = 1; pushdecl (end); cp_finish_decl (end, end_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); finish_for_init_stmt (statement); /* The new for condition. */ condition = build_x_binary_op (NE_EXPR, begin, ERROR_MARK, end, ERROR_MARK, NULL, tf_warning_or_error); finish_for_cond (condition, statement); /* The new increment expression. */ expression = finish_unary_op_expr (PREINCREMENT_EXPR, begin); finish_for_expr (expression, statement); /* The declaration is initialized with *__begin inside the loop body. */ cp_finish_decl (range_decl, build_x_indirect_ref (begin, RO_NULL, tf_warning_or_error), /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); return statement; } /* Solves BEGIN_EXPR and END_EXPR as described in cp_convert_range_for. We need to solve both at the same time because the method used depends on the existence of members begin or end. Returns the type deduced for the iterator expression. */ static tree cp_parser_perform_range_for_lookup (tree range, tree *begin, tree *end) { if (error_operand_p (range)) { *begin = *end = error_mark_node; return error_mark_node; } if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (range)))) { error ("range-based %<for%> expression of type %qT " "has incomplete type", TREE_TYPE (range)); *begin = *end = error_mark_node; return error_mark_node; } if (TREE_CODE (TREE_TYPE (range)) == ARRAY_TYPE) { /* If RANGE is an array, we will use pointer arithmetic. */ *begin = range; *end = build_binary_op (input_location, PLUS_EXPR, range, array_type_nelts_top (TREE_TYPE (range)), 0); return build_pointer_type (TREE_TYPE (TREE_TYPE (range))); } else { /* If it is not an array, we must do a bit of magic. */ tree id_begin, id_end; tree member_begin, member_end; *begin = *end = error_mark_node; id_begin = get_identifier ("begin"); id_end = get_identifier ("end"); member_begin = lookup_member (TREE_TYPE (range), id_begin, /*protect=*/2, /*want_type=*/false, tf_warning_or_error); member_end = lookup_member (TREE_TYPE (range), id_end, /*protect=*/2, /*want_type=*/false, tf_warning_or_error); if (member_begin != NULL_TREE || member_end != NULL_TREE) { /* Use the member functions. */ if (member_begin != NULL_TREE) *begin = cp_parser_range_for_member_function (range, id_begin); else error ("range-based %<for%> expression of type %qT has an " "%<end%> member but not a %<begin%>", TREE_TYPE (range)); if (member_end != NULL_TREE) *end = cp_parser_range_for_member_function (range, id_end); else error ("range-based %<for%> expression of type %qT has a " "%<begin%> member but not an %<end%>", TREE_TYPE (range)); } else { /* Use global functions with ADL. */ VEC(tree,gc) *vec; vec = make_tree_vector (); VEC_safe_push (tree, gc, vec, range); member_begin = perform_koenig_lookup (id_begin, vec, /*include_std=*/true, tf_warning_or_error); *begin = finish_call_expr (member_begin, &vec, false, true, tf_warning_or_error); member_end = perform_koenig_lookup (id_end, vec, /*include_std=*/true, tf_warning_or_error); *end = finish_call_expr (member_end, &vec, false, true, tf_warning_or_error); release_tree_vector (vec); } /* Last common checks. */ if (*begin == error_mark_node || *end == error_mark_node) { /* If one of the expressions is an error do no more checks. */ *begin = *end = error_mark_node; return error_mark_node; } else { tree iter_type = cv_unqualified (TREE_TYPE (*begin)); /* The unqualified type of the __begin and __end temporaries should be the same, as required by the multiple auto declaration. */ if (!same_type_p (iter_type, cv_unqualified (TREE_TYPE (*end)))) error ("inconsistent begin/end types in range-based %<for%> " "statement: %qT and %qT", TREE_TYPE (*begin), TREE_TYPE (*end)); return iter_type; } } } /* Helper function for cp_parser_perform_range_for_lookup. Builds a tree for RANGE.IDENTIFIER(). */ static tree cp_parser_range_for_member_function (tree range, tree identifier) { tree member, res; VEC(tree,gc) *vec; member = finish_class_member_access_expr (range, identifier, false, tf_warning_or_error); if (member == error_mark_node) return error_mark_node; vec = make_tree_vector (); res = finish_call_expr (member, &vec, /*disallow_virtual=*/false, /*koenig_p=*/false, tf_warning_or_error); release_tree_vector (vec); return res; } /* Parse an iteration-statement. iteration-statement: while ( condition ) statement do statement while ( expression ) ; for ( for-init-statement condition [opt] ; expression [opt] ) statement Returns the new WHILE_STMT, DO_STMT, FOR_STMT or RANGE_FOR_STMT. */ static tree cp_parser_iteration_statement (cp_parser* parser) { cp_token *token; enum rid keyword; tree statement; unsigned char in_statement; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_INTERATION); if (!token) return error_mark_node; /* Remember whether or not we are already within an iteration statement. */ in_statement = parser->in_statement; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_WHILE: { tree condition; /* Begin the while-statement. */ statement = begin_while_stmt (); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the condition. */ condition = cp_parser_condition (parser); finish_while_stmt_cond (condition, statement); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Parse the dependent statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the while-statement. */ finish_while_stmt (statement); } break; case RID_DO: { tree expression; /* Begin the do-statement. */ statement = begin_do_stmt (); /* Parse the body of the do-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_implicitly_scoped_statement (parser, NULL); parser->in_statement = in_statement; finish_do_body (statement); /* Look for the `while' keyword. */ cp_parser_require_keyword (parser, RID_WHILE, RT_WHILE); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* We're done with the do-statement. */ finish_do_stmt (expression, statement); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } break; case RID_FOR: { /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); statement = cp_parser_for (parser); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Parse the body of the for-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the for-statement. */ finish_for_stmt (statement); } break; default: cp_parser_error (parser, "expected iteration-statement"); statement = error_mark_node; break; } return statement; } /* Parse a for-init-statement or the declarator of a range-based-for. Returns true if a range-based-for declaration is seen. for-init-statement: expression-statement simple-declaration */ static bool cp_parser_for_init_statement (cp_parser* parser, tree *decl) { /* If the next token is a `;', then we have an empty expression-statement. Grammatically, this is also a simple-declaration, but an invalid one, because it does not declare anything. Therefore, if we did not handle this case specially, we would issue an error message about an invalid declaration. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { bool is_range_for = false; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* We're going to speculatively look for a declaration, falling back to an expression, if necessary. */ cp_parser_parse_tentatively (parser); /* Parse the declaration. */ cp_parser_simple_declaration (parser, /*function_definition_allowed_p=*/false, decl); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* It is a range-for, consume the ':' */ cp_lexer_consume_token (parser->lexer); is_range_for = true; if (cxx_dialect < cxx0x) { error_at (cp_lexer_peek_token (parser->lexer)->location, "range-based %<for%> loops are not allowed " "in C++98 mode"); *decl = error_mark_node; } } else /* The ';' is not consumed yet because we told cp_parser_simple_declaration not to. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (cp_parser_parse_definitely (parser)) return is_range_for; /* If the tentative parse failed, then we shall need to look for an expression-statement. */ } /* If we are here, it is an expression-statement. */ cp_parser_expression_statement (parser, NULL_TREE); return false; } /* Parse a jump-statement. jump-statement: break ; continue ; return expression [opt] ; return braced-init-list ; goto identifier ; GNU extension: jump-statement: goto * expression ; Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */ static tree cp_parser_jump_statement (cp_parser* parser) { tree statement = error_mark_node; cp_token *token; enum rid keyword; unsigned char in_statement; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_JUMP); if (!token) return error_mark_node; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_BREAK: in_statement = parser->in_statement & ~IN_IF_STMT; switch (in_statement) { case 0: error_at (token->location, "break statement not within loop or switch"); break; default: gcc_assert ((in_statement & IN_SWITCH_STMT) || in_statement == IN_ITERATION_STMT); statement = finish_break_stmt (); break; case IN_OMP_BLOCK: error_at (token->location, "invalid exit from OpenMP structured block"); break; case IN_OMP_FOR: error_at (token->location, "break statement used with OpenMP for loop"); break; } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; case RID_CONTINUE: switch (parser->in_statement & ~(IN_SWITCH_STMT | IN_IF_STMT)) { case 0: error_at (token->location, "continue statement not within a loop"); break; case IN_ITERATION_STMT: case IN_OMP_FOR: statement = finish_continue_stmt (); break; case IN_OMP_BLOCK: error_at (token->location, "invalid exit from OpenMP structured block"); break; default: gcc_unreachable (); } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; case RID_RETURN: { tree expr; bool expr_non_constant_p; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expr = cp_parser_braced_list (parser, &expr_non_constant_p); } else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); else /* If the next token is a `;', then there is no expression. */ expr = NULL_TREE; /* Build the return-statement. */ statement = finish_return_stmt (expr); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } break; case RID_GOTO: /* Create the goto-statement. */ if (cp_lexer_next_token_is (parser->lexer, CPP_MULT)) { /* Issue a warning about this use of a GNU extension. */ pedwarn (token->location, OPT_pedantic, "ISO C++ forbids computed gotos"); /* Consume the '*' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the dependent expression. */ finish_goto_stmt (cp_parser_expression (parser, /*cast_p=*/false, NULL)); } else finish_goto_stmt (cp_parser_identifier (parser)); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; default: cp_parser_error (parser, "expected jump-statement"); break; } return statement; } /* Parse a declaration-statement. declaration-statement: block-declaration */ static void cp_parser_declaration_statement (cp_parser* parser) { void *p; /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* Parse the block-declaration. */ cp_parser_block_declaration (parser, /*statement_p=*/true); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); /* Finish off the statement. */ finish_stmt (); } /* Some dependent statements (like `if (cond) statement'), are implicitly in their own scope. In other words, if the statement is a single statement (as opposed to a compound-statement), it is none-the-less treated as if it were enclosed in braces. Any declarations appearing in the dependent statement are out of scope after control passes that point. This function parses a statement, but ensures that is in its own scope, even if it is not a compound-statement. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. Returns the new statement. */ static tree cp_parser_implicitly_scoped_statement (cp_parser* parser, bool *if_p) { tree statement; if (if_p != NULL) *if_p = false; /* Mark if () ; with a special NOP_EXPR. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); statement = add_stmt (build_empty_stmt (loc)); } /* if a compound is opened, we simply parse the statement directly. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) statement = cp_parser_compound_statement (parser, NULL, false, false); /* If the token is not a `{', then we must take special action. */ else { /* Create a compound-statement. */ statement = begin_compound_stmt (0); /* Parse the dependent-statement. */ cp_parser_statement (parser, NULL_TREE, false, if_p); /* Finish the dummy compound-statement. */ finish_compound_stmt (statement); } /* Return the statement. */ return statement; } /* For some dependent statements (like `while (cond) statement'), we have already created a scope. Therefore, even if the dependent statement is a compound-statement, we do not want to create another scope. */ static void cp_parser_already_scoped_statement (cp_parser* parser) { /* If the token is a `{', then we must take special action. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_statement (parser, NULL_TREE, false, NULL); else { /* Avoid calling cp_parser_compound_statement, so that we don't create a new scope. Do everything else by hand. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* If the next keyword is `__label__' we have a label declaration. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } } /* Declarations [gram.dcl.dcl] */ /* Parse an optional declaration-sequence. declaration-seq: declaration declaration-seq declaration */ static void cp_parser_declaration_seq_opt (cp_parser* parser) { while (true) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; if (token->type == CPP_SEMICOLON) { /* A declaration consisting of a single semicolon is invalid. Allow it unless we're being pedantic. */ cp_lexer_consume_token (parser->lexer); if (!in_system_header) pedwarn (input_location, OPT_pedantic, "extra %<;%>"); continue; } /* If we're entering or exiting a region that's implicitly extern "C", modify the lang context appropriately. */ if (!parser->implicit_extern_c && token->implicit_extern_c) { push_lang_context (lang_name_c); parser->implicit_extern_c = true; } else if (parser->implicit_extern_c && !token->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } if (token->type == CPP_PRAGMA) { /* A top-level declaration can consist solely of a #pragma. A nested declaration cannot, so this is done here and not in cp_parser_declaration. (A #pragma at block scope is handled in cp_parser_statement.) */ cp_parser_pragma (parser, pragma_external); continue; } /* Parse the declaration itself. */ cp_parser_declaration (parser); } } /* Parse a declaration. declaration: block-declaration function-definition template-declaration explicit-instantiation explicit-specialization linkage-specification namespace-definition GNU extension: declaration: __extension__ declaration */ static void cp_parser_declaration (cp_parser* parser) { cp_token token1; cp_token token2; int saved_pedantic; void *p; tree attributes = NULL_TREE; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_declaration (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Try to figure out what kind of declaration is present. */ token1 = *cp_lexer_peek_token (parser->lexer); if (token1.type != CPP_EOF) token2 = *cp_lexer_peek_nth_token (parser->lexer, 2); else { token2.type = CPP_EOF; token2.keyword = RID_MAX; } /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token1.keyword == RID_EXTERN && cp_parser_is_pure_string_literal (&token2)) cp_parser_linkage_specification (parser); /* If the next token is `template', then we have either a template declaration, an explicit instantiation, or an explicit specialization. */ else if (token1.keyword == RID_TEMPLATE) { /* `template <>' indicates a template specialization. */ if (token2.type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); /* `template <' indicates a template declaration. */ else if (token2.type == CPP_LESS) cp_parser_template_declaration (parser, /*member_p=*/false); /* Anything else must be an explicit instantiation. */ else cp_parser_explicit_instantiation (parser); } /* If the next token is `export', then we have a template declaration. */ else if (token1.keyword == RID_EXPORT) cp_parser_template_declaration (parser, /*member_p=*/false); /* If the next token is `extern', 'static' or 'inline' and the one after that is `template', we have a GNU extended explicit instantiation directive. */ else if (cp_parser_allow_gnu_extensions_p (parser) && (token1.keyword == RID_EXTERN || token1.keyword == RID_STATIC || token1.keyword == RID_INLINE) && token2.keyword == RID_TEMPLATE) cp_parser_explicit_instantiation (parser); /* If the next token is `namespace', check for a named or unnamed namespace definition. */ else if (token1.keyword == RID_NAMESPACE && (/* A named namespace definition. */ (token2.type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_EQ)) /* An unnamed namespace definition. */ || token2.type == CPP_OPEN_BRACE || token2.keyword == RID_ATTRIBUTE)) cp_parser_namespace_definition (parser); /* An inline (associated) namespace definition. */ else if (token1.keyword == RID_INLINE && token2.keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* Objective-C++ declaration/definition. */ else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword)) cp_parser_objc_declaration (parser, NULL_TREE); else if (c_dialect_objc () && token1.keyword == RID_ATTRIBUTE && cp_parser_objc_valid_prefix_attributes (parser, &attributes)) cp_parser_objc_declaration (parser, attributes); /* We must have either a block declaration or a function definition. */ else /* Try to parse a block-declaration, or a function-definition. */ cp_parser_block_declaration (parser, /*statement_p=*/false); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); } /* Parse a block-declaration. block-declaration: simple-declaration asm-definition namespace-alias-definition using-declaration using-directive GNU Extension: block-declaration: __extension__ block-declaration C++0x Extension: block-declaration: static_assert-declaration If STATEMENT_P is TRUE, then this block-declaration is occurring as part of a declaration-statement. */ static void cp_parser_block_declaration (cp_parser *parser, bool statement_p) { cp_token *token1; int saved_pedantic; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_block_declaration (parser, statement_p); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Peek at the next token to figure out which kind of declaration is present. */ token1 = cp_lexer_peek_token (parser->lexer); /* If the next keyword is `asm', we have an asm-definition. */ if (token1->keyword == RID_ASM) { if (statement_p) cp_parser_commit_to_tentative_parse (parser); cp_parser_asm_definition (parser); } /* If the next keyword is `namespace', we have a namespace-alias-definition. */ else if (token1->keyword == RID_NAMESPACE) cp_parser_namespace_alias_definition (parser); /* If the next keyword is `using', we have a using-declaration, a using-directive, or an alias-declaration. */ else if (token1->keyword == RID_USING) { cp_token *token2; if (statement_p) cp_parser_commit_to_tentative_parse (parser); /* If the token after `using' is `namespace', then we have a using-directive. */ token2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (token2->keyword == RID_NAMESPACE) cp_parser_using_directive (parser); /* If the second token after 'using' is '=', then we have an alias-declaration. */ else if (cxx_dialect >= cxx0x && token2->type == CPP_NAME && ((cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ) || (cp_lexer_peek_nth_token (parser->lexer, 3)->keyword == RID_ATTRIBUTE))) cp_parser_alias_declaration (parser); /* Otherwise, it's a using-declaration. */ else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); } /* If the next keyword is `__label__' we have a misplaced label declaration. */ else if (token1->keyword == RID_LABEL) { cp_lexer_consume_token (parser->lexer); error_at (token1->location, "%<__label__%> not at the beginning of a block"); cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* If the next token is `static_assert' we have a static assertion. */ else if (token1->keyword == RID_STATIC_ASSERT) cp_parser_static_assert (parser, /*member_p=*/false); /* Anything else must be a simple-declaration. */ else cp_parser_simple_declaration (parser, !statement_p, /*maybe_range_for_decl*/NULL); } /* Parse a simple-declaration. simple-declaration: decl-specifier-seq [opt] init-declarator-list [opt] ; init-declarator-list: init-declarator init-declarator-list , init-declarator If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a function-definition as a simple-declaration. If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the parsed declaration if it is an uninitialized single declarator not followed by a `;', or to error_mark_node otherwise. Either way, the trailing `;', if present, will not be consumed. */ static void cp_parser_simple_declaration (cp_parser* parser, bool function_definition_allowed_p, tree *maybe_range_for_decl) { cp_decl_specifier_seq decl_specifiers; int declares_class_or_enum; bool saw_declarator; if (maybe_range_for_decl) *maybe_range_for_decl = NULL_TREE; /* Defer access checks until we know what is being declared; the checks for names appearing in the decl-specifier-seq should be done as if we were in the scope of the thing being declared. */ push_deferring_access_checks (dk_deferred); /* Parse the decl-specifier-seq. We have to keep track of whether or not the decl-specifier-seq declares a named class or enumeration type, since that is the only case in which the init-declarator-list is allowed to be empty. [dcl.dcl] In a simple-declaration, the optional init-declarator-list can be omitted only when declaring a class or enumeration, that is when the decl-specifier-seq contains either a class-specifier, an elaborated-type-specifier, or an enum-specifier. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* We no longer need to defer access checks. */ stop_deferring_access_checks (); /* In a block scope, a valid declaration must always have a decl-specifier-seq. By not trying to parse declarators, we can resolve the declaration/expression ambiguity more quickly. */ if (!function_definition_allowed_p && !decl_specifiers.any_specifiers_p) { cp_parser_error (parser, "expected declaration"); goto done; } /* If the next two tokens are both identifiers, the code is erroneous. The usual cause of this situation is code like: T t; where "T" should name a type -- but does not. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* If parsing tentatively, we should commit; we really are looking at a declaration. */ cp_parser_commit_to_tentative_parse (parser); /* Give up. */ goto done; } /* If we have seen at least one decl-specifier, and the next token is not a parenthesis, then we must be looking at a declaration. (After "int (" we might be looking at a functional cast.) */ if (decl_specifiers.any_specifiers_p && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && !cp_parser_error_occurred (parser)) cp_parser_commit_to_tentative_parse (parser); /* Keep going until we hit the `;' at the end of the simple declaration. */ saw_declarator = false; while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_token *token; bool function_definition_p; tree decl; if (saw_declarator) { /* If we are processing next declarator, coma is expected */ token = cp_lexer_peek_token (parser->lexer); gcc_assert (token->type == CPP_COMMA); cp_lexer_consume_token (parser->lexer); if (maybe_range_for_decl) *maybe_range_for_decl = error_mark_node; } else saw_declarator = true; /* Parse the init-declarator. */ decl = cp_parser_init_declarator (parser, &decl_specifiers, /*checks=*/NULL, function_definition_allowed_p, /*member_p=*/false, declares_class_or_enum, &function_definition_p, maybe_range_for_decl); /* If an error occurred while parsing tentatively, exit quickly. (That usually happens when in the body of a function; each statement is treated as a declaration-statement until proven otherwise.) */ if (cp_parser_error_occurred (parser)) goto done; /* Handle function definitions specially. */ if (function_definition_p) { /* If the next token is a `,', then we are probably processing something like: void f() {}, *p; which is erroneous. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "mixing" " declarations and function-definitions is forbidden"); } /* Otherwise, we're done with the list of declarators. */ else { pop_deferring_access_checks (); return; } } if (maybe_range_for_decl && *maybe_range_for_decl == NULL_TREE) *maybe_range_for_decl = decl; /* The next token should be either a `,' or a `;'. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', there are more declarators to come. */ if (token->type == CPP_COMMA) /* will be consumed next time around */; /* If it's a `;', we are done. */ else if (token->type == CPP_SEMICOLON || maybe_range_for_decl) break; /* Anything else is an error. */ else { /* If we have already issued an error message we don't need to issue another one. */ if (decl != error_mark_node || cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_error (parser, "expected %<,%> or %<;%>"); /* Skip tokens until we reach the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto done; } /* After the first time around, a function-definition is not allowed -- even if it was OK at first. For example: int i, f() {} is not valid. */ function_definition_allowed_p = false; } /* Issue an error message if no declarators are present, and the decl-specifier-seq does not itself declare a class or enumeration. */ if (!saw_declarator) { if (cp_parser_declares_only_class_p (parser)) shadow_tag (&decl_specifiers); /* Perform any deferred access checks. */ perform_deferred_access_checks (); } /* Consume the `;'. */ if (!maybe_range_for_decl) cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); done: pop_deferring_access_checks (); } /* Parse a decl-specifier-seq. decl-specifier-seq: decl-specifier-seq [opt] decl-specifier decl-specifier: storage-class-specifier type-specifier function-specifier friend typedef GNU Extension: decl-specifier: attributes Set *DECL_SPECS to a representation of the decl-specifier-seq. The parser flags FLAGS is used to control type-specifier parsing. *DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following flags: 1: one of the decl-specifiers is an elaborated-type-specifier (i.e., a type declaration) 2: one of the decl-specifiers is an enum-specifier or a class-specifier (i.e., a type definition) */ static void cp_parser_decl_specifier_seq (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, int* declares_class_or_enum) { bool constructor_possible_p = !parser->in_declarator_p; cp_token *start_token = NULL; /* Clear DECL_SPECS. */ clear_decl_specs (decl_specs); /* Assume no class or enumeration type is declared. */ *declares_class_or_enum = 0; /* Keep reading specifiers until there are no more to read. */ while (true) { bool constructor_p; bool found_decl_spec; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Save the first token of the decl spec list for error reporting. */ if (!start_token) start_token = token; /* Handle attributes. */ if (token->keyword == RID_ATTRIBUTE) { /* Parse the attributes. */ decl_specs->attributes = chainon (decl_specs->attributes, cp_parser_attributes_opt (parser)); continue; } /* Assume we will find a decl-specifier keyword. */ found_decl_spec = true; /* If the next token is an appropriate keyword, we can simply add it to the list. */ switch (token->keyword) { /* decl-specifier: friend constexpr */ case RID_FRIEND: if (!at_class_scope_p ()) { error_at (token->location, "%<friend%> used outside of class"); cp_lexer_purge_token (parser->lexer); } else { ++decl_specs->specs[(int) ds_friend]; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } break; case RID_CONSTEXPR: ++decl_specs->specs[(int) ds_constexpr]; cp_lexer_consume_token (parser->lexer); break; /* function-specifier: inline virtual explicit */ case RID_INLINE: case RID_VIRTUAL: case RID_EXPLICIT: cp_parser_function_specifier_opt (parser, decl_specs); break; /* decl-specifier: typedef */ case RID_TYPEDEF: ++decl_specs->specs[(int) ds_typedef]; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* A constructor declarator cannot appear in a typedef. */ constructor_possible_p = false; /* The "typedef" keyword can only occur in a declaration; we may as well commit at this point. */ cp_parser_commit_to_tentative_parse (parser); if (decl_specs->storage_class != sc_none) decl_specs->conflicting_specifiers_p = true; break; /* storage-class-specifier: auto register static extern mutable GNU Extension: thread */ case RID_AUTO: if (cxx_dialect == cxx98) { /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Complain about `auto' as a storage specifier, if we're complaining about C++0x compatibility. */ warning_at (token->location, OPT_Wc__0x_compat, "%<auto%>" " changes meaning in C++11; please remove it"); /* Set the storage class anyway. */ cp_parser_set_storage_class (parser, decl_specs, RID_AUTO, token->location); } else /* C++0x auto type-specifier. */ found_decl_spec = false; break; case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_set_storage_class (parser, decl_specs, token->keyword, token->location); break; case RID_THREAD: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); ++decl_specs->specs[(int) ds_thread]; break; default: /* We did not yet find a decl-specifier yet. */ found_decl_spec = false; break; } if (found_decl_spec && (flags & CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR) && token->keyword != RID_CONSTEXPR) error ("decl-specifier invalid in condition"); /* Constructors are a special case. The `S' in `S()' is not a decl-specifier; it is the beginning of the declarator. */ constructor_p = (!found_decl_spec && constructor_possible_p && (cp_parser_constructor_declarator_p (parser, decl_specs->specs[(int) ds_friend] != 0))); /* If we don't have a DECL_SPEC yet, then we must be looking at a type-specifier. */ if (!found_decl_spec && !constructor_p) { int decl_spec_declares_class_or_enum; bool is_cv_qualifier; tree type_spec; type_spec = cp_parser_type_specifier (parser, flags, decl_specs, /*is_declaration=*/true, &decl_spec_declares_class_or_enum, &is_cv_qualifier); *declares_class_or_enum |= decl_spec_declares_class_or_enum; /* If this type-specifier referenced a user-defined type (a typedef, class-name, etc.), then we can't allow any more such type-specifiers henceforth. [dcl.spec] The longest sequence of decl-specifiers that could possibly be a type name is taken as the decl-specifier-seq of a declaration. The sequence shall be self-consistent as described below. [dcl.type] As a general rule, at most one type-specifier is allowed in the complete decl-specifier-seq of a declaration. The only exceptions are the following: -- const or volatile can be combined with any other type-specifier. -- signed or unsigned can be combined with char, long, short, or int. -- .. Example: typedef char* Pc; void g (const int Pc); Here, Pc is *not* part of the decl-specifier seq; it's the declarator. Therefore, once we see a type-specifier (other than a cv-qualifier), we forbid any additional user-defined types. We *do* still allow things like `int int' to be considered a decl-specifier-seq, and issue the error message later. */ if (type_spec && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; /* A constructor declarator cannot follow a type-specifier. */ if (type_spec) { constructor_possible_p = false; found_decl_spec = true; if (!is_cv_qualifier) decl_specs->any_type_specifiers_p = true; } } /* If we still do not have a DECL_SPEC, then there are no more decl-specifiers. */ if (!found_decl_spec) break; decl_specs->any_specifiers_p = true; /* After we see one decl-specifier, further decl-specifiers are always optional. */ flags |= CP_PARSER_FLAGS_OPTIONAL; } cp_parser_check_decl_spec (decl_specs, start_token->location); /* Don't allow a friend specifier with a class definition. */ if (decl_specs->specs[(int) ds_friend] != 0 && (*declares_class_or_enum & 2)) error_at (start_token->location, "class definition may not be declared a friend"); } /* Parse an (optional) storage-class-specifier. storage-class-specifier: auto register static extern mutable GNU Extension: storage-class-specifier: thread Returns an IDENTIFIER_NODE corresponding to the keyword used. */ static tree cp_parser_storage_class_specifier_opt (cp_parser* parser) { switch (cp_lexer_peek_token (parser->lexer)->keyword) { case RID_AUTO: if (cxx_dialect != cxx98) return NULL_TREE; /* Fall through for C++98. */ case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; default: return NULL_TREE; } } /* Parse an (optional) function-specifier. function-specifier: inline virtual explicit Returns an IDENTIFIER_NODE corresponding to the keyword used. Updates DECL_SPECS, if it is non-NULL. */ static tree cp_parser_function_specifier_opt (cp_parser* parser, cp_decl_specifier_seq *decl_specs) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->keyword) { case RID_INLINE: if (decl_specs) ++decl_specs->specs[(int) ds_inline]; break; case RID_VIRTUAL: /* 14.5.2.3 [temp.mem] A member function template shall not be virtual. */ if (PROCESSING_REAL_TEMPLATE_DECL_P ()) error_at (token->location, "templates may not be %<virtual%>"); else if (decl_specs) ++decl_specs->specs[(int) ds_virtual]; break; case RID_EXPLICIT: if (decl_specs) ++decl_specs->specs[(int) ds_explicit]; break; default: return NULL_TREE; } /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; } /* Parse a linkage-specification. linkage-specification: extern string-literal { declaration-seq [opt] } extern string-literal declaration */ static void cp_parser_linkage_specification (cp_parser* parser) { tree linkage; /* Look for the `extern' keyword. */ cp_parser_require_keyword (parser, RID_EXTERN, RT_EXTERN); /* Look for the string-literal. */ linkage = cp_parser_string_literal (parser, false, false); /* Transform the literal into an identifier. If the literal is a wide-character string, or contains embedded NULs, then we can't handle it as the user wants. */ if (strlen (TREE_STRING_POINTER (linkage)) != (size_t) (TREE_STRING_LENGTH (linkage) - 1)) { cp_parser_error (parser, "invalid linkage-specification"); /* Assume C++ linkage. */ linkage = lang_name_cplusplus; } else linkage = get_identifier (TREE_STRING_POINTER (linkage)); /* We're now using the new linkage. */ push_lang_context (linkage); /* If the next token is a `{', then we're using the first production. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the declarations. */ cp_parser_declaration_seq_opt (parser); /* Look for the closing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* Otherwise, there's just one declaration. */ else { bool saved_in_unbraced_linkage_specification_p; saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = true; cp_parser_declaration (parser); parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; } /* We're done with the linkage-specification. */ pop_lang_context (); } /* Parse a static_assert-declaration. static_assert-declaration: static_assert ( constant-expression , string-literal ) ; If MEMBER_P, this static_assert is a class member. */ static void cp_parser_static_assert(cp_parser *parser, bool member_p) { tree condition; tree message; cp_token *token; location_t saved_loc; bool dummy; /* Peek at the `static_assert' token so we can keep track of exactly where the static assertion started. */ token = cp_lexer_peek_token (parser->lexer); saved_loc = token->location; /* Look for the `static_assert' keyword. */ if (!cp_parser_require_keyword (parser, RID_STATIC_ASSERT, RT_STATIC_ASSERT)) return; /* We know we are in a static assertion; commit to any tentative parse. */ if (cp_parser_parsing_tentatively (parser)) cp_parser_commit_to_tentative_parse (parser); /* Parse the `(' starting the static assertion condition. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the constant-expression. Allow a non-constant expression here in order to give better diagnostics in finish_static_assert. */ condition = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, /*non_constant_p=*/&dummy); /* Parse the separating `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Parse the string-literal message. */ message = cp_parser_string_literal (parser, /*translate=*/false, /*wide_ok=*/true); /* A `)' completes the static assertion. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); /* A semicolon terminates the declaration. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* Complete the static assertion, which may mean either processing the static assert now or saving it for template instantiation. */ finish_static_assert (condition, message, saved_loc, member_p); } /* Parse a `decltype' type. Returns the type. simple-type-specifier: decltype ( expression ) */ static tree cp_parser_decltype (cp_parser *parser) { tree expr; bool id_expression_or_member_access_p = false; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; cp_token *id_expr_start_token; cp_token *start_token = cp_lexer_peek_token (parser->lexer); if (start_token->type == CPP_DECLTYPE) { /* Already parsed. */ cp_lexer_consume_token (parser->lexer); return start_token->u.value; } /* Look for the `decltype' token. */ if (!cp_parser_require_keyword (parser, RID_DECLTYPE, RT_DECLTYPE)) return error_mark_node; /* Types cannot be defined in a `decltype' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ parser->type_definition_forbidden_message = G_("types may not be defined in %<decltype%> expressions"); /* The restrictions on constant-expressions do not apply inside decltype expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* Do not actually evaluate the expression. */ ++cp_unevaluated_operand; /* Do not warn about problems with the expression. */ ++c_inhibit_evaluation_warnings; /* Parse the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return error_mark_node; /* First, try parsing an id-expression. */ id_expr_start_token = cp_lexer_peek_token (parser->lexer); cp_parser_parse_tentatively (parser); expr = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (!cp_parser_error_occurred (parser) && expr != error_mark_node) { bool non_integral_constant_expression_p = false; tree id_expression = expr; cp_id_kind idk; const char *error_msg; if (TREE_CODE (expr) == IDENTIFIER_NODE) /* Lookup the name we got back from the id-expression. */ expr = cp_parser_lookup_name (parser, expr, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, id_expr_start_token->location); if (expr && expr != error_mark_node && TREE_CODE (expr) != TEMPLATE_ID_EXPR && TREE_CODE (expr) != TYPE_DECL && (TREE_CODE (expr) != BIT_NOT_EXPR || !TYPE_P (TREE_OPERAND (expr, 0))) && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) { /* Complete lookup of the id-expression. */ expr = (finish_id_expression (id_expression, expr, parser->scope, &idk, /*integral_constant_expression_p=*/false, /*allow_non_integral_constant_expression_p=*/true, &non_integral_constant_expression_p, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, id_expr_start_token->location)); if (expr == error_mark_node) /* We found an id-expression, but it was something that we should not have found. This is an error, not something we can recover from, so note that we found an id-expression and we'll recover as gracefully as possible. */ id_expression_or_member_access_p = true; } if (expr && expr != error_mark_node && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) /* We have an id-expression. */ id_expression_or_member_access_p = true; } if (!id_expression_or_member_access_p) { /* Abort the id-expression parse. */ cp_parser_abort_tentative_parse (parser); /* Parsing tentatively, again. */ cp_parser_parse_tentatively (parser); /* Parse a class member access. */ expr = cp_parser_postfix_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*member_access_only_p=*/true, NULL); if (expr && expr != error_mark_node && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) /* We have an id-expression. */ id_expression_or_member_access_p = true; } if (id_expression_or_member_access_p) /* We have parsed the complete id-expression or member access. */ cp_parser_parse_definitely (parser); else { bool saved_greater_than_is_operator_p; /* Abort our attempt to parse an id-expression or member access expression. */ cp_parser_abort_tentative_parse (parser); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Parse a full expression. */ expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; } /* Go back to evaluating expressions. */ --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; /* Restore the old message and the integral constant expression flags. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; /* Parse to the closing `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return error_mark_node; } expr = finish_decltype_type (expr, id_expression_or_member_access_p, tf_warning_or_error); /* Replace the decltype with a CPP_DECLTYPE so we don't need to parse it again. */ start_token->type = CPP_DECLTYPE; start_token->u.value = expr; start_token->keyword = RID_MAX; cp_lexer_purge_tokens_after (parser->lexer, start_token); return expr; } /* Special member functions [gram.special] */ /* Parse a conversion-function-id. conversion-function-id: operator conversion-type-id Returns an IDENTIFIER_NODE representing the operator. */ static tree cp_parser_conversion_function_id (cp_parser* parser) { tree type; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree pushed_scope = NULL_TREE; /* Look for the `operator' token. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR)) return error_mark_node; /* When we parse the conversion-type-id, the current scope will be reset. However, we need that information in able to look up the conversion function later, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We must enter the scope of the class so that the names of entities declared within the class are available in the conversion-type-id. For example, consider: struct S { typedef int I; operator I(); }; S::operator I() { ... } In order to see that `I' is a type-name in the definition, we must be in the scope of `S'. */ if (saved_scope) pushed_scope = push_scope (saved_scope); /* Parse the conversion-type-id. */ type = cp_parser_conversion_type_id (parser); /* Leave the scope of the class, if any. */ if (pushed_scope) pop_scope (pushed_scope); /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If the TYPE is invalid, indicate failure. */ if (type == error_mark_node) return error_mark_node; return mangle_conv_op_name_for_type (type); } /* Parse a conversion-type-id: conversion-type-id: type-specifier-seq conversion-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_conversion_type_id (cp_parser* parser) { tree attributes; cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; tree type_specified; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the type-specifiers. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifiers); /* If that didn't work, stop. */ if (type_specifiers.type == error_mark_node) return error_mark_node; /* Parse the conversion-declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME, /*initialized=*/0, &attributes); if (attributes) cplus_decl_attributes (&type_specified, attributes, /*flags=*/0); /* Don't give this error when parsing tentatively. This happens to work because we always parse this definitively once. */ if (! cp_parser_uncommitted_to_tentative_parse_p (parser) && type_uses_auto (type_specified)) { error ("invalid use of %<auto%> in conversion operator"); return error_mark_node; } return type_specified; } /* Parse an (optional) conversion-declarator. conversion-declarator: ptr-operator conversion-declarator [opt] */ static cp_declarator * cp_parser_conversion_declarator_opt (cp_parser* parser) { enum tree_code code; tree class_type; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Try the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals); /* If it worked, look for more conversion-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); return cp_parser_make_indirect_declarator (code, class_type, cv_quals, declarator); } return NULL; } /* Parse an (optional) ctor-initializer. ctor-initializer: : mem-initializer-list Returns TRUE iff the ctor-initializer was actually present. */ static bool cp_parser_ctor_initializer_opt (cp_parser* parser) { /* If the next token is not a `:', then there is no ctor-initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { /* Do default initialization of any bases and members. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (NULL_TREE); return false; } /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* And the mem-initializer-list. */ cp_parser_mem_initializer_list (parser); return true; } /* Parse a mem-initializer-list. mem-initializer-list: mem-initializer ... [opt] mem-initializer ... [opt] , mem-initializer-list */ static void cp_parser_mem_initializer_list (cp_parser* parser) { tree mem_initializer_list = NULL_TREE; tree target_ctor = error_mark_node; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Let the semantic analysis code know that we are starting the mem-initializer-list. */ if (!DECL_CONSTRUCTOR_P (current_function_decl)) error_at (token->location, "only constructors take member initializers"); /* Loop through the list. */ while (true) { tree mem_initializer; token = cp_lexer_peek_token (parser->lexer); /* Parse the mem-initializer. */ mem_initializer = cp_parser_mem_initializer (parser); /* If the next token is a `...', we're expanding member initializers. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* The TREE_PURPOSE must be a _TYPE, because base-specifiers can be expanded but members cannot. */ if (mem_initializer != error_mark_node && !TYPE_P (TREE_PURPOSE (mem_initializer))) { error_at (token->location, "cannot expand initializer for member %<%D%>", TREE_PURPOSE (mem_initializer)); mem_initializer = error_mark_node; } /* Construct the pack expansion type. */ if (mem_initializer != error_mark_node) mem_initializer = make_pack_expansion (mem_initializer); } if (target_ctor != error_mark_node && mem_initializer != error_mark_node) { error ("mem-initializer for %qD follows constructor delegation", TREE_PURPOSE (mem_initializer)); mem_initializer = error_mark_node; } /* Look for a target constructor. */ if (mem_initializer != error_mark_node && TYPE_P (TREE_PURPOSE (mem_initializer)) && same_type_p (TREE_PURPOSE (mem_initializer), current_class_type)) { maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS); if (mem_initializer_list) { error ("constructor delegation follows mem-initializer for %qD", TREE_PURPOSE (mem_initializer_list)); mem_initializer = error_mark_node; } target_ctor = mem_initializer; } /* Add it to the list, unless it was erroneous. */ if (mem_initializer != error_mark_node) { TREE_CHAIN (mem_initializer) = mem_initializer_list; mem_initializer_list = mem_initializer; } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } /* Perform semantic analysis. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (mem_initializer_list); } /* Parse a mem-initializer. mem-initializer: mem-initializer-id ( expression-list [opt] ) mem-initializer-id braced-init-list GNU extension: mem-initializer: ( expression-list [opt] ) Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base class) or FIELD_DECL (for a non-static data member) to initialize; the TREE_VALUE is the expression-list. An empty initialization list is represented by void_list_node. */ static tree cp_parser_mem_initializer (cp_parser* parser) { tree mem_initializer_id; tree expression_list; tree member; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Find out what is being initialized. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { permerror (token->location, "anachronistic old-style base class initializer"); mem_initializer_id = NULL_TREE; } else { mem_initializer_id = cp_parser_mem_initializer_id (parser); if (mem_initializer_id == error_mark_node) return mem_initializer_id; } member = expand_member_init (mem_initializer_id); if (member && !DECL_P (member)) in_base_initializer = 1; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_non_constant_p; maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expression_list = cp_parser_braced_list (parser, &expr_non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1; expression_list = build_tree_list (NULL_TREE, expression_list); } else { VEC(tree,gc)* vec; vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) return error_mark_node; expression_list = build_tree_list_vec (vec); release_tree_vector (vec); } if (expression_list == error_mark_node) return error_mark_node; if (!expression_list) expression_list = void_type_node; in_base_initializer = 0; return member ? build_tree_list (member, expression_list) : error_mark_node; } /* Parse a mem-initializer-id. mem-initializer-id: :: [opt] nested-name-specifier [opt] class-name identifier Returns a TYPE indicating the class to be initializer for the first production. Returns an IDENTIFIER_NODE indicating the data member to be initialized for the second production. */ static tree cp_parser_mem_initializer_id (cp_parser* parser) { bool global_scope_p; bool nested_name_specifier_p; bool template_p = false; tree id; cp_token *token = cp_lexer_peek_token (parser->lexer); /* `typename' is not allowed in this context ([temp.res]). */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { error_at (token->location, "keyword %<typename%> not allowed in this context (a qualified " "member initializer is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to assume that we have seen the `typename' keyword at this point. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, /*is_declaration=*/true) != NULL_TREE); if (nested_name_specifier_p) template_p = cp_parser_optional_template_keyword (parser); /* If there is a `::' operator or a nested-name-specifier, then we are definitely looking for a class-name. */ if (global_scope_p || nested_name_specifier_p) return cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* Otherwise, we could also be looking for an ordinary identifier. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ id = cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* If we found one, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, look for an ordinary identifier. */ return cp_parser_identifier (parser); } /* Overloading [gram.over] */ /* Parse an operator-function-id. operator-function-id: operator operator Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator_function_id (cp_parser* parser) { /* Look for the `operator' keyword. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR)) return error_mark_node; /* And then the name of the operator itself. */ return cp_parser_operator (parser); } /* Return an identifier node for a user-defined literal operator. The suffix identifier is chained to the operator name identifier. */ static tree cp_literal_operator_id (const char* name) { tree identifier; char *buffer = XNEWVEC (char, strlen (UDLIT_OP_ANSI_PREFIX) + strlen (name) + 10); sprintf (buffer, UDLIT_OP_ANSI_FORMAT, name); identifier = get_identifier (buffer); /*IDENTIFIER_UDLIT_OPNAME_P (identifier) = 1; If we get a flag someday. */ return identifier; } /* Parse an operator. operator: new delete new[] delete[] + - * / % ^ & | ~ ! = < > += -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= && || ++ -- , ->* -> () [] GNU Extensions: operator: <? >? <?= >?= Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator (cp_parser* parser) { tree id = NULL_TREE; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Figure out which operator we have. */ switch (token->type) { case CPP_KEYWORD: { enum tree_code op; /* The keyword should be either `new' or `delete'. */ if (token->keyword == RID_NEW) op = NEW_EXPR; else if (token->keyword == RID_DELETE) op = DELETE_EXPR; else break; /* Consume the `new' or `delete' token. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `[' token then this is the array variant of the operator. */ if (token->type == CPP_OPEN_SQUARE) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); id = ansi_opname (op == NEW_EXPR ? VEC_NEW_EXPR : VEC_DELETE_EXPR); } /* Otherwise, we have the non-array variant. */ else id = ansi_opname (op); return id; } case CPP_PLUS: id = ansi_opname (PLUS_EXPR); break; case CPP_MINUS: id = ansi_opname (MINUS_EXPR); break; case CPP_MULT: id = ansi_opname (MULT_EXPR); break; case CPP_DIV: id = ansi_opname (TRUNC_DIV_EXPR); break; case CPP_MOD: id = ansi_opname (TRUNC_MOD_EXPR); break; case CPP_XOR: id = ansi_opname (BIT_XOR_EXPR); break; case CPP_AND: id = ansi_opname (BIT_AND_EXPR); break; case CPP_OR: id = ansi_opname (BIT_IOR_EXPR); break; case CPP_COMPL: id = ansi_opname (BIT_NOT_EXPR); break; case CPP_NOT: id = ansi_opname (TRUTH_NOT_EXPR); break; case CPP_EQ: id = ansi_assopname (NOP_EXPR); break; case CPP_LESS: id = ansi_opname (LT_EXPR); break; case CPP_GREATER: id = ansi_opname (GT_EXPR); break; case CPP_PLUS_EQ: id = ansi_assopname (PLUS_EXPR); break; case CPP_MINUS_EQ: id = ansi_assopname (MINUS_EXPR); break; case CPP_MULT_EQ: id = ansi_assopname (MULT_EXPR); break; case CPP_DIV_EQ: id = ansi_assopname (TRUNC_DIV_EXPR); break; case CPP_MOD_EQ: id = ansi_assopname (TRUNC_MOD_EXPR); break; case CPP_XOR_EQ: id = ansi_assopname (BIT_XOR_EXPR); break; case CPP_AND_EQ: id = ansi_assopname (BIT_AND_EXPR); break; case CPP_OR_EQ: id = ansi_assopname (BIT_IOR_EXPR); break; case CPP_LSHIFT: id = ansi_opname (LSHIFT_EXPR); break; case CPP_RSHIFT: id = ansi_opname (RSHIFT_EXPR); break; case CPP_LSHIFT_EQ: id = ansi_assopname (LSHIFT_EXPR); break; case CPP_RSHIFT_EQ: id = ansi_assopname (RSHIFT_EXPR); break; case CPP_EQ_EQ: id = ansi_opname (EQ_EXPR); break; case CPP_NOT_EQ: id = ansi_opname (NE_EXPR); break; case CPP_LESS_EQ: id = ansi_opname (LE_EXPR); break; case CPP_GREATER_EQ: id = ansi_opname (GE_EXPR); break; case CPP_AND_AND: id = ansi_opname (TRUTH_ANDIF_EXPR); break; case CPP_OR_OR: id = ansi_opname (TRUTH_ORIF_EXPR); break; case CPP_PLUS_PLUS: id = ansi_opname (POSTINCREMENT_EXPR); break; case CPP_MINUS_MINUS: id = ansi_opname (PREDECREMENT_EXPR); break; case CPP_COMMA: id = ansi_opname (COMPOUND_EXPR); break; case CPP_DEREF_STAR: id = ansi_opname (MEMBER_REF); break; case CPP_DEREF: id = ansi_opname (COMPONENT_REF); break; case CPP_OPEN_PAREN: /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return ansi_opname (CALL_EXPR); case CPP_OPEN_SQUARE: /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return ansi_opname (ARRAY_REF); case CPP_STRING: if (cxx_dialect == cxx98) maybe_warn_cpp0x (CPP0X_USER_DEFINED_LITERALS); if (TREE_STRING_LENGTH (token->u.value) > 2) { error ("expected empty string after %<operator%> keyword"); return error_mark_node; } /* Consume the string. */ cp_lexer_consume_token (parser->lexer); /* Look for the suffix identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME) { id = cp_parser_identifier (parser); if (id != error_mark_node) { const char *name = IDENTIFIER_POINTER (id); return cp_literal_operator_id (name); } } else { error ("expected suffix identifier"); return error_mark_node; } case CPP_STRING_USERDEF: error ("missing space between %<\"\"%> and suffix identifier"); return error_mark_node; default: /* Anything else is an error. */ break; } /* If we have selected an identifier, we need to consume the operator token. */ if (id) cp_lexer_consume_token (parser->lexer); /* Otherwise, no valid operator name was present. */ else { cp_parser_error (parser, "expected operator"); id = error_mark_node; } return id; } /* Parse a template-declaration. template-declaration: export [opt] template < template-parameter-list > declaration If MEMBER_P is TRUE, this template-declaration occurs within a class-specifier. The grammar rule given by the standard isn't correct. What is really meant is: template-declaration: export [opt] template-parameter-list-seq decl-specifier-seq [opt] init-declarator [opt] ; export [opt] template-parameter-list-seq function-definition template-parameter-list-seq: template-parameter-list-seq [opt] template < template-parameter-list > */ static void cp_parser_template_declaration (cp_parser* parser, bool member_p) { /* Check for `export'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT)) { /* Consume the `export' token. */ cp_lexer_consume_token (parser->lexer); /* Warn that we do not support `export'. */ warning (0, "keyword %<export%> not implemented, and will be ignored"); } cp_parser_template_declaration_after_export (parser, member_p); } /* Parse a template-parameter-list. template-parameter-list: template-parameter template-parameter-list , template-parameter Returns a TREE_LIST. Each node represents a template parameter. The nodes are connected via their TREE_CHAINs. */ static tree cp_parser_template_parameter_list (cp_parser* parser) { tree parameter_list = NULL_TREE; begin_template_parm_list (); /* The loop below parses the template parms. We first need to know the total number of template parms to be able to compute proper canonical types of each dependent type. So after the loop, when we know the total number of template parms, end_template_parm_list computes the proper canonical types and fixes up the dependent types accordingly. */ while (true) { tree parameter; bool is_non_type; bool is_parameter_pack; location_t parm_loc; /* Parse the template-parameter. */ parm_loc = cp_lexer_peek_token (parser->lexer)->location; parameter = cp_parser_template_parameter (parser, &is_non_type, &is_parameter_pack); /* Add it to the list. */ if (parameter != error_mark_node) parameter_list = process_template_parm (parameter_list, parm_loc, parameter, is_non_type, is_parameter_pack, 0); else { tree err_parm = build_tree_list (parameter, parameter); parameter_list = chainon (parameter_list, err_parm); } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return end_template_parm_list (parameter_list); } /* Parse a template-parameter. template-parameter: type-parameter parameter-declaration If all goes well, returns a TREE_LIST. The TREE_VALUE represents the parameter. The TREE_PURPOSE is the default value, if any. Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true iff this parameter is a non-type parameter. *IS_PARAMETER_PACK is set to true iff this parameter is a parameter pack. */ static tree cp_parser_template_parameter (cp_parser* parser, bool *is_non_type, bool *is_parameter_pack) { cp_token *token; cp_parameter_declarator *parameter_declarator; cp_declarator *id_declarator; tree parm; /* Assume it is a type parameter or a template parameter. */ *is_non_type = false; /* Assume it not a parameter pack. */ *is_parameter_pack = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is `class' or `template', we have a type-parameter. */ if (token->keyword == RID_TEMPLATE) return cp_parser_type_parameter (parser, is_parameter_pack); /* If it is `class' or `typename' we do not know yet whether it is a type parameter or a non-type parameter. Consider: template <typename T, typename T::X X> ... or: template <class C, class D*> ... Here, the first parameter is a type parameter, and the second is a non-type parameter. We can tell by looking at the token after the identifier -- if it is a `,', `=', or `>' then we have a type parameter. */ if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS) { /* Peek at the token after `class' or `typename'. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, we have a template type parameter pack. */ if (token->type == CPP_ELLIPSIS) return cp_parser_type_parameter (parser, is_parameter_pack); /* If it's an identifier, skip it. */ if (token->type == CPP_NAME) token = cp_lexer_peek_nth_token (parser->lexer, 3); /* Now, see if the token looks like the end of a template parameter. */ if (token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) return cp_parser_type_parameter (parser, is_parameter_pack); } /* Otherwise, it is a non-type parameter. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ *is_non_type = true; parameter_declarator = cp_parser_parameter_declaration (parser, /*template_parm_p=*/true, /*parenthesized_p=*/NULL); /* If the parameter declaration is marked as a parameter pack, set *IS_PARAMETER_PACK to notify the caller. Also, unmark the declarator's PACK_EXPANSION_P, otherwise we'll get errors from grokdeclarator. */ if (parameter_declarator && parameter_declarator->declarator && parameter_declarator->declarator->parameter_pack_p) { *is_parameter_pack = true; parameter_declarator->declarator->parameter_pack_p = false; } /* If the next token is an ellipsis, and we don't already have it marked as a parameter pack, then we have a parameter pack (that has no declarator). */ if (!*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) && declarator_can_be_parameter_pack (parameter_declarator->declarator)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* We might end up with a pack expansion as the type of the non-type template parameter, in which case this is a non-type template parameter pack. */ else if (parameter_declarator && parameter_declarator->decl_specifiers.type && PACK_EXPANSION_P (parameter_declarator->decl_specifiers.type)) { *is_parameter_pack = true; parameter_declarator->decl_specifiers.type = PACK_EXPANSION_PATTERN (parameter_declarator->decl_specifiers.type); } if (*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Parameter packs cannot have default arguments. However, a user may try to do so, so we'll parse them and give an appropriate diagnostic here. */ cp_token *start_token = cp_lexer_peek_token (parser->lexer); /* Find the name of the parameter pack. */ id_declarator = parameter_declarator->declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; if (id_declarator && id_declarator->kind == cdk_id) error_at (start_token->location, "template parameter pack %qD cannot have a default argument", id_declarator->u.id.unqualified_name); else error_at (start_token->location, "template parameter pack cannot have a default argument"); /* Parse the default argument, but throw away the result. */ cp_parser_default_argument (parser, /*template_parm_p=*/true); } parm = grokdeclarator (parameter_declarator->declarator, &parameter_declarator->decl_specifiers, TPARM, /*initialized=*/0, /*attrlist=*/NULL); if (parm == error_mark_node) return error_mark_node; return build_tree_list (parameter_declarator->default_argument, parm); } /* Parse a type-parameter. type-parameter: class identifier [opt] class identifier [opt] = type-id typename identifier [opt] typename identifier [opt] = type-id template < template-parameter-list > class identifier [opt] template < template-parameter-list > class identifier [opt] = id-expression GNU Extension (variadic templates): type-parameter: class ... identifier [opt] typename ... identifier [opt] Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is the declaration of the parameter. Sets *IS_PARAMETER_PACK if this is a template parameter pack. */ static tree cp_parser_type_parameter (cp_parser* parser, bool *is_parameter_pack) { cp_token *token; tree parameter; /* Look for a keyword to tell us what kind of parameter this is. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_TYPENAME_TEMPLATE); if (!token) return error_mark_node; switch (token->keyword) { case RID_CLASS: case RID_TYPENAME: { tree identifier; tree default_argument; /* If the next token is an ellipsis, we have a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* If the next token is an identifier, then it names the parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Create the parameter. */ parameter = finish_template_type_parm (class_type_node, identifier); /* If the next token is an `=', we have a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the default-argument. */ push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_type_id (parser); /* Template parameter packs cannot have default arguments. */ if (*is_parameter_pack) { if (identifier) error_at (token->location, "template parameter pack %qD cannot have a " "default argument", identifier); else error_at (token->location, "template parameter packs cannot have " "default arguments"); default_argument = NULL_TREE; } pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; case RID_TEMPLATE: { tree identifier; tree default_argument; /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Parse the template-parameter-list. */ cp_parser_template_parameter_list (parser); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* Look for the `class' keyword. */ cp_parser_require_keyword (parser, RID_CLASS, RT_CLASS); /* If the next token is an ellipsis, we have a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* If the next token is an `=', then there is a default-argument. If the next token is a `>', we are at the end of the parameter-list. If the next token is a `,', then we are at the end of this parameter. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER) && cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) { identifier = cp_parser_identifier (parser); /* Treat invalid names as if the parameter were nameless. */ if (identifier == error_mark_node) identifier = NULL_TREE; } else identifier = NULL_TREE; /* Create the template parameter. */ parameter = finish_template_template_parm (class_type_node, identifier); /* If the next token is an `=', then there is a default-argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { bool is_template; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the id-expression. */ push_deferring_access_checks (dk_no_deferred); /* save token before parsing the id-expression, for error reporting */ token = cp_lexer_peek_token (parser->lexer); default_argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/&is_template, /*declarator_p=*/false, /*optional_p=*/false); if (TREE_CODE (default_argument) == TYPE_DECL) /* If the id-expression was a template-id that refers to a template-class, we already have the declaration here, so no further lookup is needed. */ ; else /* Look up the name. */ default_argument = cp_parser_lookup_name (parser, default_argument, none_type, /*is_template=*/is_template, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, token->location); /* See if the default argument is valid. */ default_argument = check_template_template_default_arg (default_argument); /* Template parameter packs cannot have default arguments. */ if (*is_parameter_pack) { if (identifier) error_at (token->location, "template parameter pack %qD cannot " "have a default argument", identifier); else error_at (token->location, "template parameter packs cannot " "have default arguments"); default_argument = NULL_TREE; } pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; default: gcc_unreachable (); break; } return parameter; } /* Parse a template-id. template-id: template-name < template-argument-list [opt] > If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the `template' keyword. In this case, a TEMPLATE_ID_EXPR will be returned. Otherwise, if the template-name names a function, or set of functions, returns a TEMPLATE_ID_EXPR. If the template-name names a class, returns a TYPE_DECL for the specialization. If CHECK_DEPENDENCY_P is FALSE, names are looked up in uninstantiated templates. */ static tree cp_parser_template_id (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration) { int i; tree templ; tree arguments; tree template_id; cp_token_position start_of_id = 0; deferred_access_check *chk; VEC (deferred_access_check,gc) *access_check; cp_token *next_token = NULL, *next_token_2 = NULL; bool is_identifier; /* If the next token corresponds to a template-id, there is no need to reparse it. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type == CPP_TEMPLATE_ID) { struct tree_check *check_value; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ access_check = check_value->checks; if (access_check) { FOR_EACH_VEC_ELT (deferred_access_check, access_check, i, chk) perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl); } /* Return the stored value. */ return check_value->value; } /* Avoid performing name lookup if there is no possibility of finding a template-id. */ if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR) || (next_token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))) { cp_parser_error (parser, "expected template-id"); return error_mark_node; } /* Remember where the template-id starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start_of_id = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); /* Parse the template-name. */ is_identifier = false; templ = cp_parser_template_name (parser, template_keyword_p, check_dependency_p, is_declaration, &is_identifier); if (templ == error_mark_node || is_identifier) { pop_deferring_access_checks (); return templ; } /* If we find the sequence `[:' after a template-name, it's probably a digraph-typo for `< ::'. Substitute the tokens and check if we can parse correctly the argument list. */ next_token = cp_lexer_peek_token (parser->lexer); next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (next_token->type == CPP_OPEN_SQUARE && next_token->flags & DIGRAPH && next_token_2->type == CPP_COLON && !(next_token_2->flags & PREV_WHITE)) { cp_parser_parse_tentatively (parser); /* Change `:' into `::'. */ next_token_2->type = CPP_SCOPE; /* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is CPP_LESS. */ cp_lexer_consume_token (parser->lexer); /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); if (!cp_parser_parse_definitely (parser)) { /* If we couldn't parse an argument list, then we revert our changes and return simply an error. Maybe this is not a template-id after all. */ next_token_2->type = CPP_COLON; cp_parser_error (parser, "expected %<<%>"); pop_deferring_access_checks (); return error_mark_node; } /* Otherwise, emit an error about the invalid digraph, but continue parsing because we got our argument list. */ if (permerror (next_token->location, "%<<::%> cannot begin a template-argument list")) { static bool hint = false; inform (next_token->location, "%<<:%> is an alternate spelling for %<[%>." " Insert whitespace between %<<%> and %<::%>"); if (!hint && !flag_permissive) { inform (next_token->location, "(if you use %<-fpermissive%>" " G++ will accept your code)"); hint = true; } } } else { /* Look for the `<' that starts the template-argument-list. */ if (!cp_parser_require (parser, CPP_LESS, RT_LESS)) { pop_deferring_access_checks (); return error_mark_node; } /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); } /* Build a representation of the specialization. */ if (TREE_CODE (templ) == IDENTIFIER_NODE) template_id = build_min_nt (TEMPLATE_ID_EXPR, templ, arguments); else if (DECL_TYPE_TEMPLATE_P (templ) || DECL_TEMPLATE_TEMPLATE_PARM_P (templ)) { bool entering_scope; /* In "template <typename T> ... A<T>::", A<T> is the abstract A template (rather than some instantiation thereof) only if is not nested within some other construct. For example, in "template <typename T> void f(T) { A<T>::", A<T> is just an instantiation of A. */ entering_scope = (template_parm_scope_p () && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)); template_id = finish_template_type (templ, arguments, entering_scope); } else { /* If it's not a class-template or a template-template, it should be a function-template. */ gcc_assert ((DECL_FUNCTION_TEMPLATE_P (templ) || TREE_CODE (templ) == OVERLOAD || BASELINK_P (templ))); template_id = lookup_template_function (templ, arguments); } /* If parsing tentatively, replace the sequence of tokens that makes up the template-id with a CPP_TEMPLATE_ID token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages about problems during instantiation of the template. */ if (start_of_id) { cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id); /* Reset the contents of the START_OF_ID token. */ token->type = CPP_TEMPLATE_ID; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = ggc_alloc_cleared_tree_check (); token->u.tree_check_value->value = template_id; token->u.tree_check_value->checks = get_deferred_access_checks (); token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start_of_id); /* ??? Can we actually assume that, if template_id == error_mark_node, we will have issued a diagnostic to the user, as opposed to simply marking the tentative parse as failed? */ if (cp_parser_error_occurred (parser) && template_id != error_mark_node) error_at (token->location, "parse error in template argument list"); } pop_deferring_access_checks (); return template_id; } /* Parse a template-name. template-name: identifier The standard should actually say: template-name: identifier operator-function-id A defect report has been filed about this issue. A conversion-function-id cannot be a template name because they cannot be part of a template-id. In fact, looking at this code: a.operator K<int>() the conversion-function-id is "operator K<int>", and K<int> is a type-id. It is impossible to call a templated conversion-function-id with an explicit argument list, since the only allowed template parameter is the type to which it is converting. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword, in a construction like: T::template f<3>() In that case `f' is taken to be a template-name, even though there is no way of knowing for sure. Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the name refers to a set of overloaded functions, at least one of which is a template, or an IDENTIFIER_NODE with the name of the template, if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE, names are looked up inside uninstantiated templates. */ static tree cp_parser_template_name (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration, bool *is_identifier) { tree identifier; tree decl; tree fns; cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `operator', then we have either an operator-function-id or a conversion-function-id. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR)) { /* We don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ identifier = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) { cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* Look for the identifier. */ else identifier = cp_parser_identifier (parser); /* If we didn't find an identifier, we don't have a template-id. */ if (identifier == error_mark_node) return error_mark_node; /* If the name immediately followed the `template' keyword, then it is a template-name. However, if the next token is not `<', then we do not treat it as a template-name, since it is not being used as part of a template-id. This enables us to handle constructs like: template <typename T> struct S { S(); }; template <typename T> S<T>::S(); correctly. We would treat `S' as a template -- if it were `S<T>' -- but we do not if there is no `<'. */ if (processing_template_decl && cp_parser_nth_token_starts_template_argument_list_p (parser, 1)) { /* In a declaration, in a dependent context, we pretend that the "template" keyword was present in order to improve error recovery. For example, given: template <typename T> void f(T::X<int>); we want to treat "X<int>" as a template-id. */ if (is_declaration && !template_keyword_p && parser->scope && TYPE_P (parser->scope) && check_dependency_p && dependent_scope_p (parser->scope) /* Do not do this for dtors (or ctors), since they never need the template keyword before their name. */ && !constructor_name_p (identifier, parser->scope)) { cp_token_position start = 0; /* Explain what went wrong. */ error_at (token->location, "non-template %qD used as template", identifier); inform (token->location, "use %<%T::template %D%> to indicate that it is a template", parser->scope, identifier); /* If parsing tentatively, find the location of the "<" token. */ if (cp_parser_simulate_error (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Parse the template arguments so that we can issue error messages about them. */ cp_lexer_consume_token (parser->lexer); cp_parser_enclosed_template_argument_list (parser); /* Skip tokens until we find a good place from which to continue parsing. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/false); /* If parsing tentatively, permanently remove the template argument list. That will prevent duplicate error messages from being issued about the missing "template" keyword. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); if (is_identifier) *is_identifier = true; return identifier; } /* If the "template" keyword is present, then there is generally no point in doing name-lookup, so we just return IDENTIFIER. But, if the qualifying scope is non-dependent then we can (and must) do name-lookup normally. */ if (template_keyword_p && (!parser->scope || (TYPE_P (parser->scope) && dependent_type_p (parser->scope)))) return identifier; } /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/true, /*is_namespace=*/false, check_dependency_p, /*ambiguous_decls=*/NULL, token->location); /* If DECL is a template, then the name was a template-name. */ if (TREE_CODE (decl) == TEMPLATE_DECL) ; else { tree fn = NULL_TREE; /* The standard does not explicitly indicate whether a name that names a set of overloaded declarations, some of which are templates, is a template-name. However, such a name should be a template-name; otherwise, there is no way to form a template-id for the overloaded templates. */ fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl; if (TREE_CODE (fns) == OVERLOAD) for (fn = fns; fn; fn = OVL_NEXT (fn)) if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL) break; if (!fn) { /* The name does not name a template. */ cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* If DECL is dependent, and refers to a function, then just return its name; we will look it up again during template instantiation. */ if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl)) { tree scope = ovl_scope (decl); if (TYPE_P (scope) && dependent_type_p (scope)) return identifier; } return decl; } /* Parse a template-argument-list. template-argument-list: template-argument ... [opt] template-argument-list , template-argument ... [opt] Returns a TREE_VEC containing the arguments. */ static tree cp_parser_template_argument_list (cp_parser* parser) { tree fixed_args[10]; unsigned n_args = 0; unsigned alloced = 10; tree *arg_ary = fixed_args; tree vec; bool saved_in_template_argument_list_p; bool saved_ice_p; bool saved_non_ice_p; saved_in_template_argument_list_p = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; /* Even if the template-id appears in an integral constant-expression, the contents of the argument list do not. */ saved_ice_p = parser->integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_non_ice_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = false; /* Parse the arguments. */ do { tree argument; if (n_args) /* Consume the comma. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-argument. */ argument = cp_parser_template_argument (parser); /* If the next token is an ellipsis, we're expanding a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { if (argument == error_mark_node) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "expected parameter pack before %<...%>"); } /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* Make the argument into a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ argument = make_pack_expansion (argument); } if (n_args == alloced) { alloced *= 2; if (arg_ary == fixed_args) { arg_ary = XNEWVEC (tree, alloced); memcpy (arg_ary, fixed_args, sizeof (tree) * n_args); } else arg_ary = XRESIZEVEC (tree, arg_ary, alloced); } arg_ary[n_args++] = argument; } while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); vec = make_tree_vec (n_args); while (n_args--) TREE_VEC_ELT (vec, n_args) = arg_ary[n_args]; if (arg_ary != fixed_args) free (arg_ary); parser->non_integral_constant_expression_p = saved_non_ice_p; parser->integral_constant_expression_p = saved_ice_p; parser->in_template_argument_list_p = saved_in_template_argument_list_p; #ifdef ENABLE_CHECKING SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); #endif return vec; } /* Parse a template-argument. template-argument: assignment-expression type-id id-expression The representation is that of an assignment-expression, type-id, or id-expression -- except that the qualified id-expression is evaluated, so that the value returned is either a DECL or an OVERLOAD. Although the standard says "assignment-expression", it forbids throw-expressions or assignments in the template argument. Therefore, we use "conditional-expression" instead. */ static tree cp_parser_template_argument (cp_parser* parser) { tree argument; bool template_p; bool address_p; bool maybe_type_id = false; cp_token *token = NULL, *argument_start_token = NULL; cp_id_kind idk; /* There's really no way to know what we're looking at, so we just try each alternative in order. [temp.arg] In a template-argument, an ambiguity between a type-id and an expression is resolved to a type-id, regardless of the form of the corresponding template-parameter. Therefore, we try a type-id first. */ cp_parser_parse_tentatively (parser); argument = cp_parser_template_type_arg (parser); /* If there was no error parsing the type-id but the next token is a '>>', our behavior depends on which dialect of C++ we're parsing. In C++98, we probably found a typo for '> >'. But there are type-id which are also valid expressions. For instance: struct X { int operator >> (int); }; template <int V> struct Foo {}; Foo<X () >> 5> r; Here 'X()' is a valid type-id of a function type, but the user just wanted to write the expression "X() >> 5". Thus, we remember that we found a valid type-id, but we still try to parse the argument as an expression to see what happens. In C++0x, the '>>' will be considered two separate '>' tokens. */ if (!cp_parser_error_occurred (parser) && cxx_dialect == cxx98 && cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { maybe_type_id = true; cp_parser_abort_tentative_parse (parser); } else { /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. This means that the argument is not a valid type-id. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return argument; } /* We're still not sure what the argument will be. */ cp_parser_parse_tentatively (parser); /* Try a template. */ argument_start_token = cp_lexer_peek_token (parser->lexer); argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (!cp_parser_error_occurred (parser)) { /* Figure out what is being referred to. If the id-expression was for a class template specialization, then we will have a TYPE_DECL at this point. There is no need to do name lookup at this point in that case. */ if (TREE_CODE (argument) != TYPE_DECL) argument = cp_parser_lookup_name (parser, argument, none_type, /*is_template=*/template_p, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, argument_start_token->location); if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) cp_parser_error (parser, "expected template-name"); } if (cp_parser_parse_definitely (parser)) return argument; /* It must be a non-type argument. There permitted cases are given in [temp.arg.nontype]: -- an integral constant-expression of integral or enumeration type; or -- the name of a non-type template-parameter; or -- the name of an object or function with external linkage... -- the address of an object or function with external linkage... -- a pointer to member... */ /* Look for a non-type template parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) return argument; } /* If the next token is "&", the argument must be the address of an object or function with external linkage. */ address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND); if (address_p) cp_lexer_consume_token (parser->lexer); /* See if we might have an id-expression. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->keyword == RID_OPERATOR || token->type == CPP_SCOPE || token->type == CPP_TEMPLATE_ID || token->type == CPP_NESTED_NAME_SPECIFIER) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, address_p, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (cp_parser_error_occurred (parser) || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_abort_tentative_parse (parser); else { tree probe; if (TREE_CODE (argument) == INDIRECT_REF) { gcc_assert (REFERENCE_REF_P (argument)); argument = TREE_OPERAND (argument, 0); } /* If we're in a template, we represent a qualified-id referring to a static data member as a SCOPE_REF even if the scope isn't dependent so that we can check access control later. */ probe = argument; if (TREE_CODE (probe) == SCOPE_REF) probe = TREE_OPERAND (probe, 1); if (TREE_CODE (probe) == VAR_DECL) { /* A variable without external linkage might still be a valid constant-expression, so no error is issued here if the external-linkage check fails. */ if (!address_p && !DECL_EXTERNAL_LINKAGE_P (probe)) cp_parser_simulate_error (parser); } else if (is_overloaded_fn (argument)) /* All overloaded functions are allowed; if the external linkage test does not pass, an error will be issued later. */ ; else if (address_p && (TREE_CODE (argument) == OFFSET_REF || TREE_CODE (argument) == SCOPE_REF)) /* A pointer-to-member. */ ; else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX) ; else cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) { if (address_p) argument = build_x_unary_op (ADDR_EXPR, argument, tf_warning_or_error); return argument; } } } /* If the argument started with "&", there are no other valid alternatives at this point. */ if (address_p) { cp_parser_error (parser, "invalid non-type template argument"); return error_mark_node; } /* If the argument wasn't successfully parsed as a type-id followed by '>>', the argument can only be a constant expression now. Otherwise, we try parsing the constant-expression tentatively, because the argument could really be a type-id. */ if (maybe_type_id) cp_parser_parse_tentatively (parser); argument = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, /*non_constant_p=*/NULL); argument = fold_non_dependent_expr (argument); if (!maybe_type_id) return argument; if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (cp_parser_parse_definitely (parser)) return argument; /* We did our best to parse the argument as a non type-id, but that was the only alternative that matched (albeit with a '>' after it). We can assume it's just a typo from the user, and a diagnostic will then be issued. */ return cp_parser_template_type_arg (parser); } /* Parse an explicit-instantiation. explicit-instantiation: template declaration Although the standard says `declaration', what it really means is: explicit-instantiation: template decl-specifier-seq [opt] declarator [opt] ; Things like `template int S<int>::i = 5, int S<double>::j;' are not supposed to be allowed. A defect report has been filed about this issue. GNU Extension: explicit-instantiation: storage-class-specifier template decl-specifier-seq [opt] declarator [opt] ; function-specifier template decl-specifier-seq [opt] declarator [opt] ; */ static void cp_parser_explicit_instantiation (cp_parser* parser) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; tree extension_specifier = NULL_TREE; timevar_push (TV_TEMPLATE_INST); /* Look for an (optional) storage-class-specifier or function-specifier. */ if (cp_parser_allow_gnu_extensions_p (parser)) { extension_specifier = cp_parser_storage_class_specifier_opt (parser); if (!extension_specifier) extension_specifier = cp_parser_function_specifier_opt (parser, /*decl_specs=*/NULL); } /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE); /* Let the front end know that we are processing an explicit instantiation. */ begin_explicit_instantiation (); /* [temp.explicit] says that we are supposed to ignore access control while processing explicit instantiation directives. */ push_deferring_access_checks (dk_no_check); /* Parse a decl-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* If there was exactly one decl-specifier, and it declared a class, and there's no declarator, then we have an explicit type instantiation. */ if (declares_class_or_enum && cp_parser_declares_only_class_p (parser)) { tree type; type = check_tag_decl (&decl_specifiers); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); if (type) do_type_instantiation (type, extension_specifier, /*complain=*/tf_error); } else { cp_declarator *declarator; tree decl; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type, decl_specifiers.type_location); if (declarator != cp_error_declarator) { if (decl_specifiers.specs[(int)ds_inline]) permerror (input_location, "explicit instantiation shall not use" " %<inline%> specifier"); if (decl_specifiers.specs[(int)ds_constexpr]) permerror (input_location, "explicit instantiation shall not use" " %<constexpr%> specifier"); decl = grokdeclarator (declarator, &decl_specifiers, NORMAL, 0, &decl_specifiers.attributes); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); /* Do the explicit instantiation. */ do_decl_instantiation (decl, extension_specifier); } else { pop_deferring_access_checks (); /* Skip the body of the explicit instantiation. */ cp_parser_skip_to_end_of_statement (parser); } } /* We're done with the instantiation. */ end_explicit_instantiation (); cp_parser_consume_semicolon_at_end_of_statement (parser); timevar_pop (TV_TEMPLATE_INST); } /* Parse an explicit-specialization. explicit-specialization: template < > declaration Although the standard says `declaration', what it really means is: explicit-specialization: template <> decl-specifier [opt] init-declarator [opt] ; template <> function-definition template <> explicit-specialization template <> template-declaration */ static void cp_parser_explicit_specialization (cp_parser* parser) { bool need_lang_pop; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE); /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* We have processed another parameter list. */ ++parser->num_template_parameter_lists; /* [temp] A template ... explicit specialization ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error_at (token->location, "template specialization with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* Let the front end know that we are beginning a specialization. */ if (!begin_specialization ()) { end_specialization (); return; } /* If the next keyword is `template', we need to figure out whether or not we're looking a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER) cp_parser_template_declaration_after_export (parser, /*member_p=*/false); else cp_parser_explicit_specialization (parser); } else /* Parse the dependent declaration. */ cp_parser_single_declaration (parser, /*checks=*/NULL, /*member_p=*/false, /*explicit_specialization_p=*/true, /*friend_p=*/NULL); /* We're done with the specialization. */ end_specialization (); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* We're done with this parameter list. */ --parser->num_template_parameter_lists; } /* Parse a type-specifier. type-specifier: simple-type-specifier class-specifier enum-specifier elaborated-type-specifier cv-qualifier GNU Extension: type-specifier: __complex__ Returns a representation of the type-specifier. For a class-specifier, enum-specifier, or elaborated-type-specifier, a TREE_TYPE is returned; otherwise, a TYPE_DECL is returned. The parser flags FLAGS is used to control type-specifier parsing. If IS_DECLARATION is TRUE, then this type-specifier is appearing in a decl-specifier-seq. If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a class-specifier, enum-specifier, or elaborated-type-specifier, then *DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1 if a type is declared; 2 if it is defined. Otherwise, it is set to zero. If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it is set to FALSE. */ static tree cp_parser_type_specifier (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, bool is_declaration, int* declares_class_or_enum, bool* is_cv_qualifier) { tree type_spec = NULL_TREE; cp_token *token; enum rid keyword; cp_decl_spec ds = ds_last; /* Assume this type-specifier does not declare a new type. */ if (declares_class_or_enum) *declares_class_or_enum = 0; /* And that it does not specify a cv-qualifier. */ if (is_cv_qualifier) *is_cv_qualifier = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, we can use that to guide the production we choose. */ keyword = token->keyword; switch (keyword) { case RID_ENUM: if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS)) goto elaborated_type_specifier; /* Look for the enum-specifier. */ type_spec = cp_parser_enum_specifier (parser); /* If that worked, we're done. */ if (type_spec) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token->location, /*type_definition_p=*/true); return type_spec; } else goto elaborated_type_specifier; /* Any of these indicate either a class-specifier, or an elaborated-type-specifier. */ case RID_CLASS: case RID_STRUCT: case RID_UNION: if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS)) goto elaborated_type_specifier; /* Parse tentatively so that we can back up if we don't find a class-specifier. */ cp_parser_parse_tentatively (parser); /* Look for the class-specifier. */ type_spec = cp_parser_class_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, type_spec); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token->location, /*type_definition_p=*/true); return type_spec; } /* Fall through. */ elaborated_type_specifier: /* We're declaring (not defining) a class or enum. */ if (declares_class_or_enum) *declares_class_or_enum = 1; /* Fall through. */ case RID_TYPENAME: /* Look for an elaborated-type-specifier. */ type_spec = (cp_parser_elaborated_type_specifier (parser, decl_specs && decl_specs->specs[(int) ds_friend], is_declaration)); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token->location, /*type_definition_p=*/false); return type_spec; case RID_CONST: ds = ds_const; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_VOLATILE: ds = ds_volatile; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_RESTRICT: ds = ds_restrict; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_COMPLEX: /* The `__complex__' keyword is a GNU extension. */ ds = ds_complex; break; default: break; } /* Handle simple keywords. */ if (ds != ds_last) { if (decl_specs) { ++decl_specs->specs[(int)ds]; decl_specs->any_specifiers_p = true; } return cp_lexer_consume_token (parser->lexer)->u.value; } /* If we do not already have a type-specifier, assume we are looking at a simple-type-specifier. */ type_spec = cp_parser_simple_type_specifier (parser, decl_specs, flags); /* If we didn't find a type-specifier, and a type-specifier was not optional in this context, issue an error message. */ if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type specifier"); return error_mark_node; } return type_spec; } /* Parse a simple-type-specifier. simple-type-specifier: :: [opt] nested-name-specifier [opt] type-name :: [opt] nested-name-specifier template template-id char wchar_t bool short int long signed unsigned float double void C++0x Extension: simple-type-specifier: auto decltype ( expression ) char16_t char32_t __underlying_type ( type-id ) GNU Extension: simple-type-specifier: __int128 __typeof__ unary-expression __typeof__ ( type-id ) Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is appropriately updated. */ static tree cp_parser_simple_type_specifier (cp_parser* parser, cp_decl_specifier_seq *decl_specs, cp_parser_flags flags) { tree type = NULL_TREE; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, things are easy. */ switch (token->keyword) { case RID_CHAR: if (decl_specs) decl_specs->explicit_char_p = true; type = char_type_node; break; case RID_CHAR16: type = char16_type_node; break; case RID_CHAR32: type = char32_type_node; break; case RID_WCHAR: type = wchar_type_node; break; case RID_BOOL: type = boolean_type_node; break; case RID_SHORT: if (decl_specs) ++decl_specs->specs[(int) ds_short]; type = short_integer_type_node; break; case RID_INT: if (decl_specs) decl_specs->explicit_int_p = true; type = integer_type_node; break; case RID_INT128: if (!int128_integer_type_node) break; if (decl_specs) decl_specs->explicit_int128_p = true; type = int128_integer_type_node; break; case RID_LONG: if (decl_specs) ++decl_specs->specs[(int) ds_long]; type = long_integer_type_node; break; case RID_SIGNED: if (decl_specs) ++decl_specs->specs[(int) ds_signed]; type = integer_type_node; break; case RID_UNSIGNED: if (decl_specs) ++decl_specs->specs[(int) ds_unsigned]; type = unsigned_type_node; break; case RID_FLOAT: type = float_type_node; break; case RID_DOUBLE: type = double_type_node; break; case RID_VOID: type = void_type_node; break; case RID_AUTO: maybe_warn_cpp0x (CPP0X_AUTO); type = make_auto (); break; case RID_DECLTYPE: /* Since DR 743, decltype can either be a simple-type-specifier by itself or begin a nested-name-specifier. Parsing it will replace it with a CPP_DECLTYPE, so just rewind and let the CPP_DECLTYPE handling below decide what to do. */ cp_parser_decltype (parser); cp_lexer_set_token_position (parser->lexer, token); break; case RID_TYPEOF: /* Consume the `typeof' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand to `typeof'. */ type = cp_parser_sizeof_operand (parser, RID_TYPEOF); /* If it is not already a TYPE, take its type. */ if (!TYPE_P (type)) type = finish_typeof (type); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); return type; case RID_UNDERLYING_TYPE: type = cp_parser_trait_expr (parser, RID_UNDERLYING_TYPE); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); return type; case RID_BASES: case RID_DIRECT_BASES: type = cp_parser_trait_expr (parser, token->keyword); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); return type; default: break; } /* If token is an already-parsed decltype not followed by ::, it's a simple-type-specifier. */ if (token->type == CPP_DECLTYPE && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) { type = token->u.value; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); cp_lexer_consume_token (parser->lexer); return type; } /* If the type-specifier was for a built-in type, we're done. */ if (type) { /* Record the type. */ if (decl_specs && (token->keyword != RID_SIGNED && token->keyword != RID_UNSIGNED && token->keyword != RID_SHORT && token->keyword != RID_LONG)) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); if (decl_specs) decl_specs->any_specifiers_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, type, token->location); return TYPE_NAME (type); } /* The type-specifier must be a user-defined type. */ if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES)) { bool qualified_p; bool global_p; /* Don't gobble tokens or issue error messages if this is an optional type-specifier. */ if (flags & CP_PARSER_FLAGS_OPTIONAL) cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ global_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the nested-name specifier. */ qualified_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); token = cp_lexer_peek_token (parser->lexer); /* If we have seen a nested-name-specifier, and the next token is `template', then we are using the template-id production. */ if (parser->scope && cp_parser_optional_template_keyword (parser)) { /* Look for the template-id. */ type = cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/true, /*is_declaration=*/false); /* If the template-id did not name a type, we are out of luck. */ if (TREE_CODE (type) != TYPE_DECL) { cp_parser_error (parser, "expected template-id for type"); type = NULL_TREE; } } /* Otherwise, look for a type-name. */ else type = cp_parser_type_name (parser); /* Keep track of all name-lookups performed in class scopes. */ if (type && !global_p && !qualified_p && TREE_CODE (type) == TYPE_DECL && TREE_CODE (DECL_NAME (type)) == IDENTIFIER_NODE) maybe_note_name_used_in_class (DECL_NAME (type), type); /* If it didn't work out, we don't have a TYPE. */ if ((flags & CP_PARSER_FLAGS_OPTIONAL) && !cp_parser_parse_definitely (parser)) type = NULL_TREE; if (type && decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); } /* If we didn't get a type-name, issue an error message. */ if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type-name"); return error_mark_node; } if (type && type != error_mark_node) { /* See if TYPE is an Objective-C type, and if so, parse and accept any protocol references following it. Do this before the cp_parser_check_for_invalid_template_id() call, because Objective-C types can be followed by '<...>' which would enclose protocol names rather than template arguments, and so everything is fine. */ if (c_dialect_objc () && !parser->scope && (objc_is_id (type) || objc_is_class_name (type))) { tree protos = cp_parser_objc_protocol_refs_opt (parser); tree qual_type = objc_get_protocol_qualified_type (type, protos); /* Clobber the "unqualified" type previously entered into DECL_SPECS with the new, improved protocol-qualified version. */ if (decl_specs) decl_specs->type = qual_type; return qual_type; } /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type), token->location); } return type; } /* Parse a type-name. type-name: class-name enum-name typedef-name simple-template-id [in c++0x] enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_type_name (cp_parser* parser) { tree type_decl; /* We can't know yet whether it is a class-name or not. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/false); /* If it's not a class-name, keep looking. */ if (!cp_parser_parse_definitely (parser)) { if (cxx_dialect < cxx0x) /* It must be a typedef-name or an enum-name. */ return cp_parser_nonclass_name (parser); cp_parser_parse_tentatively (parser); /* It is either a simple-template-id representing an instantiation of an alias template... */ type_decl = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*is_declaration=*/false); /* Note that this must be an instantiation of an alias template because [temp.names]/6 says: A template-id that names an alias template specialization is a type-name. Whereas [temp.names]/7 says: A simple-template-id that names a class template specialization is a class-name. */ if (type_decl != NULL_TREE && TREE_CODE (type_decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (type_decl)) gcc_assert (DECL_TEMPLATE_INSTANTIATION (type_decl)); else cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) /* ... Or a typedef-name or an enum-name. */ return cp_parser_nonclass_name (parser); } return type_decl; } /* Parse a non-class type-name, that is, either an enum-name or a typedef-name. enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_nonclass_name (cp_parser* parser) { tree type_decl; tree identifier; cp_token *token = cp_lexer_peek_token (parser->lexer); identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the type-name. */ type_decl = cp_parser_lookup_name_simple (parser, identifier, token->location); if (TREE_CODE (type_decl) == USING_DECL) { if (!DECL_DEPENDENT_P (type_decl)) type_decl = strip_using_decl (type_decl); else if (USING_DECL_TYPENAME_P (type_decl)) { /* We have found a type introduced by a using declaration at class scope that refers to a dependent type. using typename :: [opt] nested-name-specifier unqualified-id ; */ type_decl = make_typename_type (TREE_TYPE (type_decl), DECL_NAME (type_decl), typename_type, tf_error); if (type_decl != error_mark_node) type_decl = TYPE_NAME (type_decl); } } if (TREE_CODE (type_decl) != TYPE_DECL && (objc_is_id (identifier) || objc_is_class_name (identifier))) { /* See if this is an Objective-C type. */ tree protos = cp_parser_objc_protocol_refs_opt (parser); tree type = objc_get_protocol_qualified_type (identifier, protos); if (type) type_decl = TYPE_NAME (type); } /* Issue an error if we did not find a type-name. */ if (TREE_CODE (type_decl) != TYPE_DECL /* In Objective-C, we have the complication that class names are normally type names and start declarations (eg, the "NSObject" in "NSObject *object;"), but can be used in an Objective-C 2.0 dot-syntax (as in "NSObject.version") which is an expression. So, a classname followed by a dot is not a valid type-name. */ || (objc_is_class_name (TREE_TYPE (type_decl)) && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT)) { if (!cp_parser_simulate_error (parser)) cp_parser_name_lookup_error (parser, identifier, type_decl, NLE_TYPE, token->location); return error_mark_node; } /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ else if (type_decl != error_mark_node && !parser->scope) maybe_note_name_used_in_class (identifier, type_decl); return type_decl; } /* Parse an elaborated-type-specifier. Note that the grammar given here incorporates the resolution to DR68. elaborated-type-specifier: class-key :: [opt] nested-name-specifier [opt] identifier class-key :: [opt] nested-name-specifier [opt] template [opt] template-id enum-key :: [opt] nested-name-specifier [opt] identifier typename :: [opt] nested-name-specifier identifier typename :: [opt] nested-name-specifier template [opt] template-id GNU extension: elaborated-type-specifier: class-key attributes :: [opt] nested-name-specifier [opt] identifier class-key attributes :: [opt] nested-name-specifier [opt] template [opt] template-id enum attributes :: [opt] nested-name-specifier [opt] identifier If IS_FRIEND is TRUE, then this elaborated-type-specifier is being declared `friend'. If IS_DECLARATION is TRUE, then this elaborated-type-specifier appears in a decl-specifiers-seq, i.e., something is being declared. Returns the TYPE specified. */ static tree cp_parser_elaborated_type_specifier (cp_parser* parser, bool is_friend, bool is_declaration) { enum tag_types tag_type; tree identifier; tree type = NULL_TREE; tree attributes = NULL_TREE; tree globalscope; cp_token *token = NULL; /* See if we're looking at the `enum' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM)) { /* Consume the `enum' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's an enumeration type. */ tag_type = enum_type; /* Issue a warning if the `struct' or `class' key (for C++0x scoped enums) is used here. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS) || cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT)) { pedwarn (input_location, 0, "elaborated-type-specifier " "for a scoped enum must not use the %<%D%> keyword", cp_lexer_peek_token (parser->lexer)->u.value); /* Consume the `struct' or `class' and parse it anyway. */ cp_lexer_consume_token (parser->lexer); } /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Or, it might be `typename'. */ else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's a `typename' type. */ tag_type = typename_type; } /* Otherwise it must be a class-key. */ else { tag_type = cp_parser_class_key (parser); if (tag_type == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Look for the `::' operator. */ globalscope = cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ if (tag_type == typename_type && !globalscope) { if (!cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration)) return error_mark_node; } else /* Even though `typename' is not present, the proposed resolution to Core Issue 180 says that in `class A<T>::B', `B' should be considered a type-name, even if `A<T>' is dependent. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration); /* For everything but enumeration types, consider a template-id. For an enumeration type, consider only a plain identifier. */ if (tag_type != enum_type) { bool template_p = false; tree decl; /* Allow the `template' keyword. */ template_p = cp_parser_optional_template_keyword (parser); /* If we didn't see `template', we don't know if there's a template-id or not. */ if (!template_p) cp_parser_parse_tentatively (parser); /* Parse the template-id. */ token = cp_lexer_peek_token (parser->lexer); decl = cp_parser_template_id (parser, template_p, /*check_dependency_p=*/true, is_declaration); /* If we didn't find a template-id, look for an ordinary identifier. */ if (!template_p && !cp_parser_parse_definitely (parser)) ; /* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is in effect, then we must assume that, upon instantiation, the template will correspond to a class. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && tag_type == typename_type) type = make_typename_type (parser->scope, decl, typename_type, /*complain=*/tf_error); /* If the `typename' keyword is in effect and DECL is not a type decl. Then type is non existant. */ else if (tag_type == typename_type && TREE_CODE (decl) != TYPE_DECL) type = NULL_TREE; else type = check_elaborated_type_specifier (tag_type, decl, /*allow_template_p=*/true); } if (!type) { token = cp_lexer_peek_token (parser->lexer); identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) { parser->scope = NULL_TREE; return error_mark_node; } /* For a `typename', we needn't call xref_tag. */ if (tag_type == typename_type && TREE_CODE (parser->scope) != NAMESPACE_DECL) return cp_parser_make_typename_type (parser, parser->scope, identifier, token->location); /* Look up a qualified name in the usual way. */ if (parser->scope) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, token->location); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* If we are parsing friend declaration, DECL may be a TEMPLATE_DECL tree node here. However, we need to check whether this TEMPLATE_DECL results in valid code. Consider the following example: namespace N { template <class T> class C {}; } class X { template <class T> friend class N::C; // #1, valid code }; template <class T> class Y { friend class N::C; // #2, invalid code }; For both case #1 and #2, we arrive at a TEMPLATE_DECL after name lookup of `N::C'. We see that friend declaration must be template for the code to be valid. Note that processing_template_decl does not work here since it is always 1 for the above two cases. */ decl = (cp_parser_maybe_treat_template_as_class (decl, /*tag_name_p=*/is_friend && parser->num_template_parameter_lists)); if (TREE_CODE (decl) != TYPE_DECL) { cp_parser_diagnose_invalid_type_name (parser, parser->scope, identifier, token->location); return error_mark_node; } if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE) { bool allow_template = (parser->num_template_parameter_lists || DECL_SELF_REFERENCE_P (decl)); type = check_elaborated_type_specifier (tag_type, decl, allow_template); if (type == error_mark_node) return error_mark_node; } /* Forward declarations of nested types, such as class C1::C2; class C1::C2::C3; are invalid unless all components preceding the final '::' are complete. If all enclosing types are complete, these declarations become merely pointless. Invalid forward declarations of nested types are errors caught elsewhere in parsing. Those that are pointless arrive here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) && !is_friend && !processing_explicit_instantiation) warning (0, "declaration %qD does not declare anything", decl); type = TREE_TYPE (decl); } else { /* An elaborated-type-specifier sometimes introduces a new type and sometimes names an existing type. Normally, the rule is that it introduces a new type only if there is not an existing type of the same name already in scope. For example, given: struct S {}; void f() { struct S s; } the `struct S' in the body of `f' is the same `struct S' as in the global scope; the existing definition is used. However, if there were no global declaration, this would introduce a new local class named `S'. An exception to this rule applies to the following code: namespace N { struct S; } Here, the elaborated-type-specifier names a new type unconditionally; even if there is already an `S' in the containing scope this declaration names a new type. This exception only applies if the elaborated-type-specifier forms the complete declaration: [class.name] A declaration consisting solely of `class-key identifier ;' is either a redeclaration of the name in the current scope or a forward declaration of the identifier as a class name. It introduces the name into the current scope. We are in this situation precisely when the next token is a `;'. An exception to the exception is that a `friend' declaration does *not* name a new type; i.e., given: struct S { friend struct T; }; `T' is not a new type in the scope of `S'. Also, `new struct S' or `sizeof (struct S)' never results in the definition of a new type; a new type can only be declared in a declaration context. */ tag_scope ts; bool template_p; if (is_friend) /* Friends have special name lookup rules. */ ts = ts_within_enclosing_non_class; else if (is_declaration && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) /* This is a `class-key identifier ;' */ ts = ts_current; else ts = ts_global; template_p = (parser->num_template_parameter_lists && (cp_parser_next_token_starts_class_definition_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))); /* An unqualified name was used to reference this type, so there were no qualifying templates. */ if (!cp_parser_check_template_parameters (parser, /*num_templates=*/0, token->location, /*declarator=*/NULL)) return error_mark_node; type = xref_tag (tag_type, identifier, ts, template_p); } } if (type == error_mark_node) return error_mark_node; /* Allow attributes on forward declarations of classes. */ if (attributes) { if (TREE_CODE (type) == TYPENAME_TYPE) warning (OPT_Wattributes, "attributes ignored on uninstantiated type"); else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type) && ! processing_explicit_instantiation) warning (OPT_Wattributes, "attributes ignored on template instantiation"); else if (is_declaration && cp_parser_declares_only_class_p (parser)) cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); else warning (OPT_Wattributes, "attributes ignored on elaborated-type-specifier that is not a forward declaration"); } if (tag_type != enum_type) { /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (tag_type == class_type); cp_parser_check_class_key (tag_type, type); } /* A "<" cannot follow an elaborated type specifier. If that happens, the user was probably trying to form a template-id. */ cp_parser_check_for_invalid_template_id (parser, type, token->location); return type; } /* Parse an enum-specifier. enum-specifier: enum-head { enumerator-list [opt] } enum-head { enumerator-list , } [C++0x] enum-head: enum-key identifier [opt] enum-base [opt] enum-key nested-name-specifier identifier enum-base [opt] enum-key: enum enum class [C++0x] enum struct [C++0x] enum-base: [C++0x] : type-specifier-seq opaque-enum-specifier: enum-key identifier enum-base [opt] ; GNU Extensions: enum-key attributes[opt] identifier [opt] enum-base [opt] { enumerator-list [opt] }attributes[opt] enum-key attributes[opt] identifier [opt] enum-base [opt] { enumerator-list, }attributes[opt] [C++0x] Returns an ENUM_TYPE representing the enumeration, or NULL_TREE if the token stream isn't an enum-specifier after all. */ static tree cp_parser_enum_specifier (cp_parser* parser) { tree identifier; tree type = NULL_TREE; tree prev_scope; tree nested_name_specifier = NULL_TREE; tree attributes; bool scoped_enum_p = false; bool has_underlying_type = false; bool nested_being_defined = false; bool new_value_list = false; bool is_new_type = false; bool is_anonymous = false; tree underlying_type = NULL_TREE; cp_token *type_start_token = NULL; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* Parse tentatively so that we can back up if we don't find a enum-specifier. */ cp_parser_parse_tentatively (parser); /* Caller guarantees that the current token is 'enum', an identifier possibly follows, and the token after that is an opening brace. If we don't have an identifier, fabricate an anonymous name for the enumeration being defined. */ cp_lexer_consume_token (parser->lexer); /* Parse the "class" or "struct", which indicates a scoped enumeration type in C++0x. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS) || cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT)) { if (cxx_dialect < cxx0x) maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS); /* Consume the `struct' or `class' token. */ cp_lexer_consume_token (parser->lexer); scoped_enum_p = true; } attributes = cp_parser_attributes_opt (parser); /* Clear the qualification. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Figure out in what scope the declaration is being placed. */ prev_scope = current_scope (); type_start_token = cp_lexer_peek_token (parser->lexer); push_deferring_access_checks (dk_no_check); nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false); if (nested_name_specifier) { tree name; identifier = cp_parser_identifier (parser); name = cp_parser_lookup_name (parser, identifier, enum_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, input_location); if (name) { type = TREE_TYPE (name); if (TREE_CODE (type) == TYPENAME_TYPE) { /* Are template enums allowed in ISO? */ if (template_parm_scope_p ()) pedwarn (type_start_token->location, OPT_pedantic, "%qD is an enumeration template", name); /* ignore a typename reference, for it will be solved by name in start_enum. */ type = NULL_TREE; } } else error_at (type_start_token->location, "%qD is not an enumerator-name", identifier); } else { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else { identifier = make_anon_name (); is_anonymous = true; } } pop_deferring_access_checks (); /* Check for the `:' that denotes a specified underlying type in C++0x. Note that a ':' could also indicate a bitfield width, however. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_decl_specifier_seq type_specifiers; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifiers); /* At this point this is surely not elaborated type specifier. */ if (!cp_parser_parse_definitely (parser)) return NULL_TREE; if (cxx_dialect < cxx0x) maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS); has_underlying_type = true; /* If that didn't work, stop. */ if (type_specifiers.type != error_mark_node) { underlying_type = grokdeclarator (NULL, &type_specifiers, TYPENAME, /*initialized=*/0, NULL); if (underlying_type == error_mark_node) underlying_type = NULL_TREE; } } /* Look for the `{' but don't consume it yet. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { if (cxx_dialect < cxx0x || (!scoped_enum_p && !underlying_type)) { cp_parser_error (parser, "expected %<{%>"); if (has_underlying_type) { type = NULL_TREE; goto out; } } /* An opaque-enum-specifier must have a ';' here. */ if ((scoped_enum_p || underlying_type) && cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_error (parser, "expected %<;%> or %<{%>"); if (has_underlying_type) { type = NULL_TREE; goto out; } } } if (!has_underlying_type && !cp_parser_parse_definitely (parser)) return NULL_TREE; if (nested_name_specifier) { if (CLASS_TYPE_P (nested_name_specifier)) { nested_being_defined = TYPE_BEING_DEFINED (nested_name_specifier); TYPE_BEING_DEFINED (nested_name_specifier) = 1; push_scope (nested_name_specifier); } else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL) { push_nested_namespace (nested_name_specifier); } } /* Issue an error message if type-definitions are forbidden here. */ if (!cp_parser_check_type_definition (parser)) type = error_mark_node; else /* Create the new type. We do this before consuming the opening brace so the enum will be recorded as being on the line of its tag (or the 'enum' keyword, if there is no tag). */ type = start_enum (identifier, type, underlying_type, scoped_enum_p, &is_new_type); /* If the next token is not '{' it is an opaque-enum-specifier or an elaborated-type-specifier. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { timevar_push (TV_PARSE_ENUM); if (nested_name_specifier) { /* The following catches invalid code such as: enum class S<int>::E { A, B, C }; */ if (!processing_specialization && CLASS_TYPE_P (nested_name_specifier) && CLASSTYPE_USE_TEMPLATE (nested_name_specifier)) error_at (type_start_token->location, "cannot add an enumerator " "list to a template instantiation"); /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ if (prev_scope && !is_ancestor (prev_scope, nested_name_specifier)) { if (at_namespace_scope_p ()) error_at (type_start_token->location, "declaration of %qD in namespace %qD which does not " "enclose %qD", type, prev_scope, nested_name_specifier); else error_at (type_start_token->location, "declaration of %qD in %qD which does not enclose %qD", type, prev_scope, nested_name_specifier); type = error_mark_node; } } if (scoped_enum_p) begin_scope (sk_scoped_enum, type); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); if (type == error_mark_node) ; /* Nothing to add */ else if (OPAQUE_ENUM_P (type) || (cxx_dialect > cxx98 && processing_specialization)) { new_value_list = true; SET_OPAQUE_ENUM_P (type, false); DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location; } else { error_at (type_start_token->location, "multiple definition of %q#T", type); error_at (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "previous definition here"); type = error_mark_node; } if (type == error_mark_node) cp_parser_skip_to_end_of_block_or_statement (parser); /* If the next token is not '}', then there are some enumerators. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) cp_parser_enumerator_list (parser, type); /* Consume the final '}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); if (scoped_enum_p) finish_scope (); timevar_pop (TV_PARSE_ENUM); } else { /* If a ';' follows, then it is an opaque-enum-specifier and additional restrictions apply. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { if (is_anonymous) error_at (type_start_token->location, "opaque-enum-specifier without name"); else if (nested_name_specifier) error_at (type_start_token->location, "opaque-enum-specifier must use a simple identifier"); } } /* Look for trailing attributes to apply to this enumeration, and apply them if appropriate. */ if (cp_parser_allow_gnu_extensions_p (parser)) { tree trailing_attr = cp_parser_attributes_opt (parser); trailing_attr = chainon (trailing_attr, attributes); cplus_decl_attributes (&type, trailing_attr, (int) ATTR_FLAG_TYPE_IN_PLACE); } /* Finish up the enumeration. */ if (type != error_mark_node) { if (new_value_list) finish_enum_value_list (type); if (is_new_type) finish_enum (type); } if (nested_name_specifier) { if (CLASS_TYPE_P (nested_name_specifier)) { TYPE_BEING_DEFINED (nested_name_specifier) = nested_being_defined; pop_scope (nested_name_specifier); } else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL) { pop_nested_namespace (nested_name_specifier); } } out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; return type; } /* Parse an enumerator-list. The enumerators all have the indicated TYPE. enumerator-list: enumerator-definition enumerator-list , enumerator-definition */ static void cp_parser_enumerator_list (cp_parser* parser, tree type) { while (true) { /* Parse an enumerator-definition. */ cp_parser_enumerator_definition (parser, type); /* If the next token is not a ',', we've reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); /* If the next token is a `}', there is a trailing comma. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { if (cxx_dialect < cxx0x && !in_system_header) pedwarn (input_location, OPT_pedantic, "comma at end of enumerator list"); break; } } } /* Parse an enumerator-definition. The enumerator has the indicated TYPE. enumerator-definition: enumerator enumerator = constant-expression enumerator: identifier */ static void cp_parser_enumerator_definition (cp_parser* parser, tree type) { tree identifier; tree value; location_t loc; /* Save the input location because we are interested in the location of the identifier and not the location of the explicit value. */ loc = cp_lexer_peek_token (parser->lexer)->location; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* If the next token is an '=', then there is an explicit value. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the value. */ value = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); } else value = NULL_TREE; /* If we are processing a template, make sure the initializer of the enumerator doesn't contain any bare template parameter pack. */ if (check_for_bare_parameter_packs (value)) value = error_mark_node; /* integral_constant_value will pull out this expression, so make sure it's folded as appropriate. */ value = fold_non_dependent_expr (value); /* Create the enumerator. */ build_enumerator (identifier, value, type, loc); } /* Parse a namespace-name. namespace-name: original-namespace-name namespace-alias Returns the NAMESPACE_DECL for the namespace. */ static tree cp_parser_namespace_name (cp_parser* parser) { tree identifier; tree namespace_decl; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Get the name of the namespace. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the identifier in the currently active scope. Look only for namespaces, due to: [basic.lookup.udir] When looking up a namespace-name in a using-directive or alias definition, only namespace names are considered. And: [basic.lookup.qual] During the lookup of a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. (Note that cp_parser_qualifying_entity only calls this function if the token after the name is the scope resolution operator.) */ namespace_decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/false, /*is_namespace=*/true, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, token->location); /* If it's not a namespace, issue an error. */ if (namespace_decl == error_mark_node || TREE_CODE (namespace_decl) != NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "%qD is not a namespace-name", identifier); cp_parser_error (parser, "expected namespace-name"); namespace_decl = error_mark_node; } return namespace_decl; } /* Parse a namespace-definition. namespace-definition: named-namespace-definition unnamed-namespace-definition named-namespace-definition: original-namespace-definition extension-namespace-definition original-namespace-definition: namespace identifier { namespace-body } extension-namespace-definition: namespace original-namespace-name { namespace-body } unnamed-namespace-definition: namespace { namespace-body } */ static void cp_parser_namespace_definition (cp_parser* parser) { tree identifier, attribs; bool has_visibility; bool is_inline; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_INLINE)) { maybe_warn_cpp0x (CPP0X_INLINE_NAMESPACES); is_inline = true; cp_lexer_consume_token (parser->lexer); } else is_inline = false; /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Get the name of the namespace. We do not attempt to distinguish between an original-namespace-definition and an extension-namespace-definition at this point. The semantic analysis routines are responsible for that. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Parse any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Look for the `{' to start the namespace. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* Start the namespace. */ push_namespace (identifier); /* "inline namespace" is equivalent to a stub namespace definition followed by a strong using directive. */ if (is_inline) { tree name_space = current_namespace; /* Set up namespace association. */ DECL_NAMESPACE_ASSOCIATIONS (name_space) = tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE, DECL_NAMESPACE_ASSOCIATIONS (name_space)); /* Import the contents of the inline namespace. */ pop_namespace (); do_using_directive (name_space); push_namespace (identifier); } has_visibility = handle_namespace_attrs (current_namespace, attribs); /* Parse the body of the namespace. */ cp_parser_namespace_body (parser); if (has_visibility) pop_visibility (1); /* Finish the namespace. */ pop_namespace (); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* Parse a namespace-body. namespace-body: declaration-seq [opt] */ static void cp_parser_namespace_body (cp_parser* parser) { cp_parser_declaration_seq_opt (parser); } /* Parse a namespace-alias-definition. namespace-alias-definition: namespace identifier = qualified-namespace-specifier ; */ static void cp_parser_namespace_alias_definition (cp_parser* parser) { tree identifier; tree namespace_specifier; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* Look for the `=' token. */ if (!cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { error_at (token->location, "%<namespace%> definition is not allowed here"); /* Skip the definition. */ cp_lexer_consume_token (parser->lexer); if (cp_parser_skip_to_closing_brace (parser)) cp_lexer_consume_token (parser->lexer); return; } cp_parser_require (parser, CPP_EQ, RT_EQ); /* Look for the qualified-namespace-specifier. */ namespace_specifier = cp_parser_qualified_namespace_specifier (parser); /* Look for the `;' token. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* Register the alias in the symbol table. */ do_namespace_alias (identifier, namespace_specifier); } /* Parse a qualified-namespace-specifier. qualified-namespace-specifier: :: [opt] nested-name-specifier [opt] namespace-name Returns a NAMESPACE_DECL corresponding to the specified namespace. */ static tree cp_parser_qualified_namespace_specifier (cp_parser* parser) { /* Look for the optional `::'. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); return cp_parser_namespace_name (parser); } /* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an access declaration. using-declaration: using typename [opt] :: [opt] nested-name-specifier unqualified-id ; using :: unqualified-id ; access-declaration: qualified-id ; */ static bool cp_parser_using_declaration (cp_parser* parser, bool access_declaration_p) { cp_token *token; bool typename_p = false; bool global_scope_p; tree decl; tree identifier; tree qscope; int oldcount = errorcount; cp_token *diag_token = NULL; if (access_declaration_p) { diag_token = cp_lexer_peek_token (parser->lexer); cp_parser_parse_tentatively (parser); } else { /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's `typename'. */ if (token->keyword == RID_TYPENAME) { /* Remember that we've seen it. */ typename_p = true; /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); } } /* Look for the optional global scope qualification. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* If we saw `typename', or didn't see `::', then there must be a nested-name-specifier present. */ if (typename_p || !global_scope_p) qscope = cp_parser_nested_name_specifier (parser, typename_p, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Otherwise, we could be in either of the two productions. In that case, treat the nested-name-specifier as optional. */ else qscope = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); if (!qscope) qscope = global_namespace; if (access_declaration_p && cp_parser_error_occurred (parser)) /* Something has already gone wrong; there's no need to parse further. Since an error has occurred, the return value of cp_parser_parse_definitely will be false, as required. */ return cp_parser_parse_definitely (parser); token = cp_lexer_peek_token (parser->lexer); /* Parse the unqualified-id. */ identifier = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*declarator_p=*/true, /*optional_p=*/false); if (access_declaration_p) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) return false; } /* The function we call to handle a using-declaration is different depending on what scope we are in. */ if (qscope == error_mark_node || identifier == error_mark_node) ; else if (TREE_CODE (identifier) != IDENTIFIER_NODE && TREE_CODE (identifier) != BIT_NOT_EXPR) /* [namespace.udecl] A using declaration shall not name a template-id. */ error_at (token->location, "a template-id may not appear in a using-declaration"); else { if (at_class_scope_p ()) { /* Create the USING_DECL. */ decl = do_class_using_decl (parser->scope, identifier); if (decl && typename_p) USING_DECL_TYPENAME_P (decl) = 1; if (check_for_bare_parameter_packs (decl)) return false; else /* Add it to the list of members in this class. */ finish_member_declaration (decl); } else { decl = cp_parser_lookup_name_simple (parser, identifier, token->location); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, identifier, decl, NLE_NULL, token->location); else if (check_for_bare_parameter_packs (decl)) return false; else if (!at_namespace_scope_p ()) do_local_using_decl (decl, qscope, identifier); else do_toplevel_using_decl (decl, qscope, identifier); } } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (access_declaration_p && errorcount == oldcount) warning_at (diag_token->location, OPT_Wdeprecated, "access declarations are deprecated " "in favour of using-declarations; " "suggestion: add the %<using%> keyword"); return true; } /* Parse an alias-declaration. alias-declaration: using identifier attribute-specifier-seq [opt] = type-id */ static tree cp_parser_alias_declaration (cp_parser* parser) { tree id, type, decl, pushed_scope = NULL_TREE, attributes; location_t id_location; cp_declarator *declarator; cp_decl_specifier_seq decl_specs; bool member_p; const char *saved_message = NULL; /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); id_location = cp_lexer_peek_token (parser->lexer)->location; id = cp_parser_identifier (parser); if (id == error_mark_node) return error_mark_node; attributes = cp_parser_attributes_opt (parser); if (attributes == error_mark_node) return error_mark_node; cp_parser_require (parser, CPP_EQ, RT_EQ); if (cp_parser_error_occurred (parser)) return error_mark_node; /* Now we are going to parse the type-id of the declaration. */ /* [dcl.type]/3 says: "A type-specifier-seq shall not define a class or enumeration unless it appears in the type-id of an alias-declaration (7.1.3) that is not the declaration of a template-declaration." In other words, if we currently are in an alias template, the type-id should not define a type. So let's set parser->type_definition_forbidden_message in that case; cp_parser_check_type_definition (called by cp_parser_class_specifier) will then emit an error if a type is defined in the type-id. */ if (parser->num_template_parameter_lists) { saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in alias template declarations"); } type = cp_parser_type_id (parser); /* Restore the error message if need be. */ if (parser->num_template_parameter_lists) parser->type_definition_forbidden_message = saved_message; cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (cp_parser_error_occurred (parser)) return error_mark_node; /* A typedef-name can also be introduced by an alias-declaration. The identifier following the using keyword becomes a typedef-name. It has the same semantics as if it were introduced by the typedef specifier. In particular, it does not define a new type and it shall not appear in the type-id. */ clear_decl_specs (&decl_specs); decl_specs.type = type; decl_specs.attributes = attributes; ++decl_specs.specs[(int) ds_typedef]; ++decl_specs.specs[(int) ds_alias]; declarator = make_id_declarator (NULL_TREE, id, sfk_none); declarator->id_loc = id_location; member_p = at_class_scope_p (); if (member_p) decl = grokfield (declarator, &decl_specs, NULL_TREE, false, NULL_TREE, attributes); else decl = start_decl (declarator, &decl_specs, 0, attributes, NULL_TREE, &pushed_scope); if (decl == error_mark_node) return decl; cp_finish_decl (decl, NULL_TREE, 0, NULL_TREE, 0); if (pushed_scope) pop_scope (pushed_scope); /* If decl is a template, return its TEMPLATE_DECL so that it gets added into the symbol table; otherwise, return the TYPE_DECL. */ if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl))) { decl = DECL_TI_TEMPLATE (decl); if (member_p) check_member_template (decl); } return decl; } /* Parse a using-directive. using-directive: using namespace :: [opt] nested-name-specifier [opt] namespace-name ; */ static void cp_parser_using_directive (cp_parser* parser) { tree namespace_decl; tree attribs; /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); /* And the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* And the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Get the namespace being used. */ namespace_decl = cp_parser_namespace_name (parser); /* And any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Update the symbol table. */ parse_using_directive (namespace_decl, attribs); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } /* Parse an asm-definition. asm-definition: asm ( string-literal ) ; GNU Extension: asm-definition: asm volatile [opt] ( string-literal ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] : asm-clobber-list [opt] ) ; asm volatile [opt] goto ( string-literal : : asm-operand-list [opt] : asm-clobber-list [opt] : asm-goto-list ) ; */ static void cp_parser_asm_definition (cp_parser* parser) { tree string; tree outputs = NULL_TREE; tree inputs = NULL_TREE; tree clobbers = NULL_TREE; tree labels = NULL_TREE; tree asm_stmt; bool volatile_p = false; bool extended_p = false; bool invalid_inputs_p = false; bool invalid_outputs_p = false; bool goto_p = false; required_token missing = RT_NONE; /* Look for the `asm' keyword. */ cp_parser_require_keyword (parser, RID_ASM, RT_ASM); /* See if the next token is `volatile'. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE)) { /* Remember that we saw the `volatile' keyword. */ volatile_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && cp_lexer_next_token_is_keyword (parser->lexer, RID_GOTO)) { /* Remember that we saw the `goto' keyword. */ goto_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } /* Look for the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return; /* Look for the string. */ string = cp_parser_string_literal (parser, false, false); if (string == error_mark_node) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return; } /* If we're allowing GNU extensions, check for the extended assembly syntax. Unfortunately, the `:' tokens need not be separated by a space in C, and so, for compatibility, we tolerate that here too. Doing that means that we have to treat the `::' operator as two `:' tokens. */ if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && (cp_lexer_next_token_is (parser->lexer, CPP_COLON) || cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))) { bool inputs_p = false; bool clobbers_p = false; bool labels_p = false; /* The extended syntax was used. */ extended_p = true; /* Look for outputs. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && !goto_p) outputs = cp_parser_asm_operand_list (parser); if (outputs == error_mark_node) invalid_outputs_p = true; } /* If the next token is `::', there are no outputs, and the next token is the beginning of the inputs. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The inputs are coming next. */ inputs_p = true; /* Look for inputs. */ if (inputs_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) inputs = cp_parser_asm_operand_list (parser); if (inputs == error_mark_node) invalid_inputs_p = true; } else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The clobbers are coming next. */ clobbers_p = true; /* Look for clobbers. */ if (clobbers_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { clobbers_p = true; /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the clobbers. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) clobbers = cp_parser_asm_clobber_list (parser); } else if (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The labels are coming next. */ labels_p = true; /* Look for labels. */ if (labels_p || (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_COLON))) { labels_p = true; /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the labels. */ labels = cp_parser_asm_label_list (parser); } if (goto_p && !labels_p) missing = clobbers_p ? RT_COLON : RT_COLON_SCOPE; } else if (goto_p) missing = RT_COLON_SCOPE; /* Look for the closing `)'. */ if (!cp_parser_require (parser, missing ? CPP_COLON : CPP_CLOSE_PAREN, missing ? missing : RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (!invalid_inputs_p && !invalid_outputs_p) { /* Create the ASM_EXPR. */ if (parser->in_function_body) { asm_stmt = finish_asm_stmt (volatile_p, string, outputs, inputs, clobbers, labels); /* If the extended syntax was not used, mark the ASM_EXPR. */ if (!extended_p) { tree temp = asm_stmt; if (TREE_CODE (temp) == CLEANUP_POINT_EXPR) temp = TREE_OPERAND (temp, 0); ASM_INPUT_P (temp) = 1; } } else cgraph_add_asm_node (string); } } /* Declarators [gram.dcl.decl] */ /* Parse an init-declarator. init-declarator: declarator initializer [opt] GNU Extension: init-declarator: declarator asm-specification [opt] attributes [opt] initializer [opt] function-definition: decl-specifier-seq [opt] declarator ctor-initializer [opt] function-body decl-specifier-seq [opt] declarator function-try-block GNU Extension: function-definition: __extension__ function-definition TM Extension: function-definition: decl-specifier-seq [opt] declarator function-transaction-block The DECL_SPECIFIERS apply to this declarator. Returns a representation of the entity declared. If MEMBER_P is TRUE, then this declarator appears in a class scope. The new DECL created by this declarator is returned. The CHECKS are access checks that should be performed once we know what entity is being declared (and, therefore, what classes have befriended it). If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and for a function-definition here as well. If the declarator is a declarator for a function-definition, *FUNCTION_DEFINITION_P will be TRUE upon return. By that point, the function-definition will have been completely parsed. FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P is FALSE. If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the parsed declaration if it is an uninitialized single declarator not followed by a `;', or to error_mark_node otherwise. Either way, the trailing `;', if present, will not be consumed. If returned, this declarator will be created with SD_INITIALIZED but will not call cp_finish_decl. */ static tree cp_parser_init_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, VEC (deferred_access_check,gc)* checks, bool function_definition_allowed_p, bool member_p, int declares_class_or_enum, bool* function_definition_p, tree* maybe_range_for_decl) { cp_token *token = NULL, *asm_spec_start_token = NULL, *attributes_start_token = NULL; cp_declarator *declarator; tree prefix_attributes; tree attributes; tree asm_specification; tree initializer; tree decl = NULL_TREE; tree scope; int is_initialized; /* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if initialized with "= ..", CPP_OPEN_PAREN if initialized with "(...)". */ enum cpp_ttype initialization_kind; bool is_direct_init = false; bool is_non_constant_init; int ctor_dtor_or_conv_p; bool friend_p; tree pushed_scope = NULL_TREE; bool range_for_decl_p = false; /* Gather the attributes that were provided with the decl-specifiers. */ prefix_attributes = decl_specifiers->attributes; /* Assume that this is not the declarator for a function definition. */ if (function_definition_p) *function_definition_p = false; /* Defer access checks while parsing the declarator; we cannot know what names are accessible until we know what is being declared. */ resume_deferring_access_checks (); /* Parse the declarator. */ token = cp_lexer_peek_token (parser->lexer); declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p); /* Gather up the deferred checks. */ stop_deferring_access_checks (); /* If the DECLARATOR was erroneous, there's no need to go further. */ if (declarator == cp_error_declarator) return error_mark_node; /* Check that the number of template-parameter-lists is OK. */ if (!cp_parser_check_declarator_template_parameters (parser, declarator, token->location)) return error_mark_node; if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers->type, decl_specifiers->type_location); /* Figure out what scope the entity declared by the DECLARATOR is located in. `grokdeclarator' sometimes changes the scope, so we compute it now. */ scope = get_scope_of_declarator (declarator); /* Perform any lookups in the declared type which were thought to be dependent, but are not in the scope of the declarator. */ decl_specifiers->type = maybe_update_decl_type (decl_specifiers->type, scope); /* If we're allowing GNU extensions, look for an asm-specification and attributes. */ if (cp_parser_allow_gnu_extensions_p (parser)) { /* Look for an asm-specification. */ asm_spec_start_token = cp_lexer_peek_token (parser->lexer); asm_specification = cp_parser_asm_specification_opt (parser); /* And attributes. */ attributes_start_token = cp_lexer_peek_token (parser->lexer); attributes = cp_parser_attributes_opt (parser); } else { asm_specification = NULL_TREE; attributes = NULL_TREE; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check to see if the token indicates the start of a function-definition. */ if (function_declarator_p (declarator) && cp_parser_token_starts_function_definition_p (token)) { if (!function_definition_allowed_p) { /* If a function-definition should not appear here, issue an error message. */ cp_parser_error (parser, "a function-definition is not allowed here"); return error_mark_node; } else { location_t func_brace_location = cp_lexer_peek_token (parser->lexer)->location; /* Neither attributes nor an asm-specification are allowed on a function-definition. */ if (asm_specification) error_at (asm_spec_start_token->location, "an asm-specification is not allowed " "on a function-definition"); if (attributes) error_at (attributes_start_token->location, "attributes are not allowed on a function-definition"); /* This is a function-definition. */ *function_definition_p = true; /* Parse the function definition. */ if (member_p) decl = cp_parser_save_member_function_body (parser, decl_specifiers, declarator, prefix_attributes); else decl = (cp_parser_function_definition_from_specifiers_and_declarator (parser, decl_specifiers, prefix_attributes, declarator)); if (decl != error_mark_node && DECL_STRUCT_FUNCTION (decl)) { /* This is where the prologue starts... */ DECL_STRUCT_FUNCTION (decl)->function_start_locus = func_brace_location; } return decl; } } /* [dcl.dcl] Only in function declarations for constructors, destructors, and type conversions can the decl-specifier-seq be omitted. We explicitly postpone this check past the point where we handle function-definitions because we tolerate function-definitions that are missing their return types in some modes. */ if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0) { cp_parser_error (parser, "expected constructor, destructor, or type conversion"); return error_mark_node; } /* An `=' or an `(', or an '{' in C++0x, indicates an initializer. */ if (token->type == CPP_EQ || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_BRACE) { is_initialized = SD_INITIALIZED; initialization_kind = token->type; if (maybe_range_for_decl) *maybe_range_for_decl = error_mark_node; if (token->type == CPP_EQ && function_declarator_p (declarator)) { cp_token *t2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (t2->keyword == RID_DEFAULT) is_initialized = SD_DEFAULTED; else if (t2->keyword == RID_DELETE) is_initialized = SD_DELETED; } } else { /* If the init-declarator isn't initialized and isn't followed by a `,' or `;', it's not a valid init-declarator. */ if (token->type != CPP_COMMA && token->type != CPP_SEMICOLON) { if (maybe_range_for_decl && *maybe_range_for_decl != error_mark_node) range_for_decl_p = true; else { cp_parser_error (parser, "expected initializer"); return error_mark_node; } } is_initialized = SD_UNINITIALIZED; initialization_kind = CPP_EOF; } /* Because start_decl has side-effects, we should only call it if we know we're going ahead. By this point, we know that we cannot possibly be looking at any other construct. */ cp_parser_commit_to_tentative_parse (parser); /* If the decl specifiers were bad, issue an error now that we're sure this was intended to be a declarator. Then continue declaring the variable(s), as int, to try to cut down on further errors. */ if (decl_specifiers->any_specifiers_p && decl_specifiers->type == error_mark_node) { cp_parser_error (parser, "invalid type in declaration"); decl_specifiers->type = integer_type_node; } /* Check to see whether or not this declaration is a friend. */ friend_p = cp_parser_friend_p (decl_specifiers); /* Enter the newly declared entry in the symbol table. If we're processing a declaration in a class-specifier, we wait until after processing the initializer. */ if (!member_p) { if (parser->in_unbraced_linkage_specification_p) decl_specifiers->storage_class = sc_extern; decl = start_decl (declarator, decl_specifiers, range_for_decl_p? SD_INITIALIZED : is_initialized, attributes, prefix_attributes, &pushed_scope); /* Adjust location of decl if declarator->id_loc is more appropriate: set, and decl wasn't merged with another decl, in which case its location would be different from input_location, and more accurate. */ if (DECL_P (decl) && declarator->id_loc != UNKNOWN_LOCATION && DECL_SOURCE_LOCATION (decl) == input_location) DECL_SOURCE_LOCATION (decl) = declarator->id_loc; } else if (scope) /* Enter the SCOPE. That way unqualified names appearing in the initializer will be looked up in SCOPE. */ pushed_scope = push_scope (scope); /* Perform deferred access control checks, now that we know in which SCOPE the declared entity resides. */ if (!member_p && decl) { tree saved_current_function_decl = NULL_TREE; /* If the entity being declared is a function, pretend that we are in its scope. If it is a `friend', it may have access to things that would not otherwise be accessible. */ if (TREE_CODE (decl) == FUNCTION_DECL) { saved_current_function_decl = current_function_decl; current_function_decl = decl; } /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); /* Perform the access control checks for the declarator and the decl-specifiers. */ perform_deferred_access_checks (); /* Restore the saved value. */ if (TREE_CODE (decl) == FUNCTION_DECL) current_function_decl = saved_current_function_decl; } /* Parse the initializer. */ initializer = NULL_TREE; is_direct_init = false; is_non_constant_init = true; if (is_initialized) { if (function_declarator_p (declarator)) { cp_token *initializer_start_token = cp_lexer_peek_token (parser->lexer); if (initialization_kind == CPP_EQ) initializer = cp_parser_pure_specifier (parser); else { /* If the declaration was erroneous, we don't really know what the user intended, so just silently consume the initializer. */ if (decl != error_mark_node) error_at (initializer_start_token->location, "initializer provided for function"); cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } else { /* We want to record the extra mangling scope for in-class initializers of class members and initializers of static data member templates. The former involves deferring parsing of the initializer until end of class as with default arguments. So right here we only handle the latter. */ if (!member_p && processing_template_decl) start_lambda_scope (decl); initializer = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); if (!member_p && processing_template_decl) finish_lambda_scope (); } } /* The old parser allows attributes to appear after a parenthesized initializer. Mark Mitchell proposed removing this functionality on the GCC mailing lists on 2002-08-13. This parser accepts the attributes -- but ignores them. */ if (cp_parser_allow_gnu_extensions_p (parser) && initialization_kind == CPP_OPEN_PAREN) if (cp_parser_attributes_opt (parser)) warning (OPT_Wattributes, "attributes after parenthesized initializer ignored"); /* For an in-class declaration, use `grokfield' to create the declaration. */ if (member_p) { if (pushed_scope) { pop_scope (pushed_scope); pushed_scope = NULL_TREE; } decl = grokfield (declarator, decl_specifiers, initializer, !is_non_constant_init, /*asmspec=*/NULL_TREE, prefix_attributes); if (decl && TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); } /* Finish processing the declaration. But, skip member declarations. */ if (!member_p && decl && decl != error_mark_node && !range_for_decl_p) { cp_finish_decl (decl, initializer, !is_non_constant_init, asm_specification, /* If the initializer is in parentheses, then this is a direct-initialization, which means that an `explicit' constructor is OK. Otherwise, an `explicit' constructor cannot be used. */ ((is_direct_init || !is_initialized) ? LOOKUP_NORMAL : LOOKUP_IMPLICIT)); } else if ((cxx_dialect != cxx98) && friend_p && decl && TREE_CODE (decl) == FUNCTION_DECL) /* Core issue #226 (C++0x only): A default template-argument shall not be specified in a friend class template declaration. */ check_default_tmpl_args (decl, current_template_parms, /*is_primary=*/1, /*is_partial=*/0, /*is_friend_decl=*/1); if (!friend_p && pushed_scope) pop_scope (pushed_scope); return decl; } /* Parse a declarator. declarator: direct-declarator ptr-operator declarator abstract-declarator: ptr-operator abstract-declarator [opt] direct-abstract-declarator GNU Extensions: declarator: attributes [opt] direct-declarator attributes [opt] ptr-operator declarator abstract-declarator: attributes [opt] ptr-operator abstract-declarator [opt] attributes [opt] direct-abstract-declarator If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to detect constructor, destructor or conversion operators. It is set to -1 if the declarator is a name, and +1 if it is a function. Otherwise it is set to zero. Usually you just want to test for >0, but internally the negative value is used. (The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have a decl-specifier-seq unless it declares a constructor, destructor, or conversion. It might seem that we could check this condition in semantic analysis, rather than parsing, but that makes it difficult to handle something like `f()'. We want to notice that there are no decl-specifiers, and therefore realize that this is an expression, not a declaration.) If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is a direct-declarator of the form "(...)". MEMBER_P is true iff this declarator is a member-declarator. */ static cp_declarator * cp_parser_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool* parenthesized_p, bool member_p) { cp_declarator *declarator; enum tree_code code; cp_cv_quals cv_quals; tree class_type; tree attributes = NULL_TREE; /* Assume this is not a constructor, destructor, or type-conversion operator. */ if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_attributes_opt (parser); /* Check for the ptr-operator production. */ cp_parser_parse_tentatively (parser); /* Parse the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals); /* If that worked, then we have a ptr-operator. */ if (cp_parser_parse_definitely (parser)) { /* If a ptr-operator was found, then this declarator was not parenthesized. */ if (parenthesized_p) *parenthesized_p = true; /* The dependent declarator is optional if we are parsing an abstract-declarator. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED) cp_parser_parse_tentatively (parser); /* Parse the dependent declarator. */ declarator = cp_parser_declarator (parser, dcl_kind, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* If we are parsing an abstract-declarator, we must handle the case where the dependent declarator is absent. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED && !cp_parser_parse_definitely (parser)) declarator = NULL; declarator = cp_parser_make_indirect_declarator (code, class_type, cv_quals, declarator); } /* Everything else is a direct-declarator. */ else { if (parenthesized_p) *parenthesized_p = cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN); declarator = cp_parser_direct_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, member_p); } if (attributes && declarator && declarator != cp_error_declarator) declarator->attributes = attributes; return declarator; } /* Parse a direct-declarator or direct-abstract-declarator. direct-declarator: declarator-id direct-declarator ( parameter-declaration-clause ) cv-qualifier-seq [opt] exception-specification [opt] direct-declarator [ constant-expression [opt] ] ( declarator ) direct-abstract-declarator: direct-abstract-declarator [opt] ( parameter-declaration-clause ) cv-qualifier-seq [opt] exception-specification [opt] direct-abstract-declarator [opt] [ constant-expression [opt] ] ( abstract-declarator ) Returns a representation of the declarator. DCL_KIND is CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if we are parsing a direct-declarator. It is CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case of ambiguity we prefer an abstract declarator, as per [dcl.ambig.res]. CTOR_DTOR_OR_CONV_P and MEMBER_P are as for cp_parser_declarator. */ static cp_declarator * cp_parser_direct_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool member_p) { cp_token *token; cp_declarator *declarator = NULL; tree scope = NULL_TREE; bool saved_default_arg_ok_p = parser->default_arg_ok_p; bool saved_in_declarator_p = parser->in_declarator_p; bool first = true; tree pushed_scope = NULL_TREE; while (true) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_PAREN) { /* This is either a parameter-declaration-clause, or a parenthesized declarator. When we know we are parsing a named declarator, it must be a parenthesized declarator if FIRST is true. For instance, `(int)' is a parameter-declaration-clause, with an omitted direct-abstract-declarator. But `((*))', is a parenthesized abstract declarator. Finally, when T is a template parameter `(T)' is a parameter-declaration-clause, and not a parenthesized named declarator. We first try and parse a parameter-declaration-clause, and then try a nested declarator (if FIRST is true). It is not an error for it not to be a parameter-declaration-clause, even when FIRST is false. Consider, int i (int); int i (3); The first is the declaration of a function while the second is the definition of a variable, including its initializer. Having seen only the parenthesis, we cannot know which of these two alternatives should be selected. Even more complex are examples like: int i (int (a)); int i (int (3)); The former is a function-declaration; the latter is a variable initialization. Thus again, we try a parameter-declaration-clause, and if that fails, we back out and return. */ if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) { tree params; unsigned saved_num_template_parameter_lists; bool is_declarator = false; tree t; /* In a member-declarator, the only valid interpretation of a parenthesis is the start of a parameter-declaration-clause. (It is invalid to initialize a static data member with a parenthesized initializer; only the "=" form of initialization is permitted.) */ if (!member_p) cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); if (first) { /* If this is going to be an abstract declarator, we're in a declarator and we can't have default args. */ parser->default_arg_ok_p = false; parser->in_declarator_p = true; } /* Inside the function parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; begin_scope (sk_function_parms, NULL_TREE); /* Parse the parameter-declaration-clause. */ params = cp_parser_parameter_declaration_clause (parser); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* Consume the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, parse the cv-qualifier-seq and the exception-specification. */ if (member_p || cp_parser_parse_definitely (parser)) { cp_cv_quals cv_quals; cp_virt_specifiers virt_specifiers; tree exception_specification; tree late_return; is_declarator = true; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0; first = false; /* Parse the cv-qualifier-seq. */ cv_quals = cp_parser_cv_qualifier_seq_opt (parser); /* And the exception-specification. */ exception_specification = cp_parser_exception_specification_opt (parser); /* Parse the virt-specifier-seq. */ virt_specifiers = cp_parser_virt_specifier_seq_opt (parser); late_return = (cp_parser_late_return_type_opt (parser, member_p ? cv_quals : -1)); /* Create the function-declarator. */ declarator = make_call_declarator (declarator, params, cv_quals, virt_specifiers, exception_specification, late_return); /* Any subsequent parameter lists are to do with return type, so are not those of the declared function. */ parser->default_arg_ok_p = false; } /* Remove the function parms from scope. */ for (t = current_binding_level->names; t; t = DECL_CHAIN (t)) pop_binding (DECL_NAME (t), t); leave_scope(); if (is_declarator) /* Repeat the main loop. */ continue; } /* If this is the first, we can try a parenthesized declarator. */ if (first) { bool saved_in_type_id_in_expr_p; parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the nested declarator. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; declarator = cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; first = false; /* Expect a `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) declarator = cp_error_declarator; if (declarator == cp_error_declarator) break; goto handle_declarator; } /* Otherwise, we must be done. */ else break; } else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) && token->type == CPP_OPEN_SQUARE) { /* Parse an array-declarator. */ tree bounds; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; first = false; parser->default_arg_ok_p = false; parser->in_declarator_p = true; /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is `]', then there is no constant-expression. */ if (token->type != CPP_CLOSE_SQUARE) { bool non_constant_p; bounds = cp_parser_constant_expression (parser, /*allow_non_constant=*/true, &non_constant_p); if (!non_constant_p) /* OK */; else if (error_operand_p (bounds)) /* Already gave an error. */; else if (!parser->in_function_body || current_binding_level->kind == sk_function_parms) { /* Normally, the array bound must be an integral constant expression. However, as an extension, we allow VLAs in function scopes as long as they aren't part of a parameter declaration. */ cp_parser_error (parser, "array bound is not an integer constant"); bounds = error_mark_node; } else if (processing_template_decl) { /* Remember this wasn't a constant-expression. */ bounds = build_nop (TREE_TYPE (bounds), bounds); TREE_SIDE_EFFECTS (bounds) = 1; } } else bounds = NULL_TREE; /* Look for the closing `]'. */ if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE)) { declarator = cp_error_declarator; break; } declarator = make_array_declarator (declarator, bounds); } else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT) { { tree qualifying_scope; tree unqualified_name; special_function_kind sfk; bool abstract_ok; bool pack_expansion_p = false; cp_token *declarator_id_start_token; /* Parse a declarator-id */ abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER); if (abstract_ok) { cp_parser_parse_tentatively (parser); /* If we see an ellipsis, we should be looking at a parameter pack. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' */ cp_lexer_consume_token (parser->lexer); pack_expansion_p = true; } } declarator_id_start_token = cp_lexer_peek_token (parser->lexer); unqualified_name = cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok); qualifying_scope = parser->scope; if (abstract_ok) { bool okay = false; if (!unqualified_name && pack_expansion_p) { /* Check whether an error occurred. */ okay = !cp_parser_error_occurred (parser); /* We already consumed the ellipsis to mark a parameter pack, but we have no way to report it, so abort the tentative parse. We will be exiting immediately anyway. */ cp_parser_abort_tentative_parse (parser); } else okay = cp_parser_parse_definitely (parser); if (!okay) unqualified_name = error_mark_node; else if (unqualified_name && (qualifying_scope || (TREE_CODE (unqualified_name) != IDENTIFIER_NODE))) { cp_parser_error (parser, "expected unqualified-id"); unqualified_name = error_mark_node; } } if (!unqualified_name) return NULL; if (unqualified_name == error_mark_node) { declarator = cp_error_declarator; pack_expansion_p = false; declarator->parameter_pack_p = false; break; } if (qualifying_scope && at_namespace_scope_p () && TREE_CODE (qualifying_scope) == TYPENAME_TYPE) { /* In the declaration of a member of a template class outside of the class itself, the SCOPE will sometimes be a TYPENAME_TYPE. For example, given: template <typename T> int S<T>::R::i = 3; the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In this context, we must resolve S<T>::R to an ordinary type, rather than a typename type. The reason we normally avoid resolving TYPENAME_TYPEs is that a specialization of `S' might render `S<T>::R' not a type. However, if `S' is specialized, then this `i' will not be used, so there is no harm in resolving the types here. */ tree type; /* Resolve the TYPENAME_TYPE. */ type = resolve_typename_type (qualifying_scope, /*only_current_p=*/false); /* If that failed, the declarator is invalid. */ if (TREE_CODE (type) == TYPENAME_TYPE) { if (typedef_variant_p (type)) error_at (declarator_id_start_token->location, "cannot define member of dependent typedef " "%qT", type); else error_at (declarator_id_start_token->location, "%<%T::%E%> is not a type", TYPE_CONTEXT (qualifying_scope), TYPE_IDENTIFIER (qualifying_scope)); } qualifying_scope = type; } sfk = sfk_none; if (unqualified_name) { tree class_type; if (qualifying_scope && CLASS_TYPE_P (qualifying_scope)) class_type = qualifying_scope; else class_type = current_class_type; if (TREE_CODE (unqualified_name) == TYPE_DECL) { tree name_type = TREE_TYPE (unqualified_name); if (class_type && same_type_p (name_type, class_type)) { if (qualifying_scope && CLASSTYPE_USE_TEMPLATE (name_type)) { error_at (declarator_id_start_token->location, "invalid use of constructor as a template"); inform (declarator_id_start_token->location, "use %<%T::%D%> instead of %<%T::%D%> to " "name the constructor in a qualified name", class_type, DECL_NAME (TYPE_TI_TEMPLATE (class_type)), class_type, name_type); declarator = cp_error_declarator; break; } else unqualified_name = constructor_name (class_type); } else { /* We do not attempt to print the declarator here because we do not have enough information about its original syntactic form. */ cp_parser_error (parser, "invalid declarator"); declarator = cp_error_declarator; break; } } if (class_type) { if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR) sfk = sfk_destructor; else if (IDENTIFIER_TYPENAME_P (unqualified_name)) sfk = sfk_conversion; else if (/* There's no way to declare a constructor for an anonymous type, even if the type got a name for linkage purposes. */ !TYPE_WAS_ANONYMOUS (class_type) && constructor_name_p (unqualified_name, class_type)) { unqualified_name = constructor_name (class_type); sfk = sfk_constructor; } else if (is_overloaded_fn (unqualified_name) && DECL_CONSTRUCTOR_P (get_first_fn (unqualified_name))) sfk = sfk_constructor; if (ctor_dtor_or_conv_p && sfk != sfk_none) *ctor_dtor_or_conv_p = -1; } } declarator = make_id_declarator (qualifying_scope, unqualified_name, sfk); declarator->id_loc = token->location; declarator->parameter_pack_p = pack_expansion_p; if (pack_expansion_p) maybe_warn_variadic_templates (); } handle_declarator:; scope = get_scope_of_declarator (declarator); if (scope) /* Any names that appear after the declarator-id for a member are looked up in the containing scope. */ pushed_scope = push_scope (scope); parser->in_declarator_p = true; if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p) || (declarator && declarator->kind == cdk_id)) /* Default args are only allowed on function declarations. */ parser->default_arg_ok_p = saved_default_arg_ok_p; else parser->default_arg_ok_p = false; first = false; } /* We're done. */ else break; } /* For an abstract declarator, we might wind up with nothing at this point. That's an error; the declarator is not optional. */ if (!declarator) cp_parser_error (parser, "expected declarator"); /* If we entered a scope, we must exit it now. */ if (pushed_scope) pop_scope (pushed_scope); parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; return declarator; } /* Parse a ptr-operator. ptr-operator: * cv-qualifier-seq [opt] & :: [opt] nested-name-specifier * cv-qualifier-seq [opt] GNU Extension: ptr-operator: & cv-qualifier-seq [opt] Returns INDIRECT_REF if a pointer, or pointer-to-member, was used. Returns ADDR_EXPR if a reference was used, or NON_LVALUE_EXPR for an rvalue reference. In the case of a pointer-to-member, *TYPE is filled in with the TYPE containing the member. *CV_QUALS is filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there are no cv-qualifiers. Returns ERROR_MARK if an error occurred. Note that the tree codes returned by this function have nothing to do with the types of trees that will be eventually be created to represent the pointer or reference type being parsed. They are just constants with suggestive names. */ static enum tree_code cp_parser_ptr_operator (cp_parser* parser, tree* type, cp_cv_quals *cv_quals) { enum tree_code code = ERROR_MARK; cp_token *token; /* Assume that it's not a pointer-to-member. */ *type = NULL_TREE; /* And that there are no cv-qualifiers. */ *cv_quals = TYPE_UNQUALIFIED; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `*', `&' or `&&' we have a pointer or reference. */ if (token->type == CPP_MULT) code = INDIRECT_REF; else if (token->type == CPP_AND) code = ADDR_EXPR; else if ((cxx_dialect != cxx98) && token->type == CPP_AND_AND) /* C++0x only */ code = NON_LVALUE_EXPR; if (code != ERROR_MARK) { /* Consume the `*', `&' or `&&'. */ cp_lexer_consume_token (parser->lexer); /* A `*' can be followed by a cv-qualifier-seq, and so can a `&', if we are allowing GNU extensions. (The only qualifier that can legally appear after `&' is `restrict', but that is enforced during semantic analysis. */ if (code == INDIRECT_REF || cp_parser_allow_gnu_extensions_p (parser)) *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } else { /* Try the pointer-to-member case. */ cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name specifier. */ token = cp_lexer_peek_token (parser->lexer); cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false); /* If we found it, and the next token is a `*', then we are indeed looking at a pointer-to-member operator. */ if (!cp_parser_error_occurred (parser) && cp_parser_require (parser, CPP_MULT, RT_MULT)) { /* Indicate that the `*' operator was used. */ code = INDIRECT_REF; if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error_at (token->location, "%qD is a namespace", parser->scope); else if (TREE_CODE (parser->scope) == ENUMERAL_TYPE) error_at (token->location, "cannot form pointer to member of " "non-class %q#T", parser->scope); else { /* The type of which the member is a member is given by the current SCOPE. */ *type = parser->scope; /* The next name will not be qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Look for the optional cv-qualifier-seq. */ *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } } /* If that didn't work we don't have a ptr-operator. */ if (!cp_parser_parse_definitely (parser)) cp_parser_error (parser, "expected ptr-operator"); } return code; } /* Parse an (optional) cv-qualifier-seq. cv-qualifier-seq: cv-qualifier cv-qualifier-seq [opt] cv-qualifier: const volatile GNU Extension: cv-qualifier: __restrict__ Returns a bitmask representing the cv-qualifiers. */ static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser* parser) { cp_cv_quals cv_quals = TYPE_UNQUALIFIED; while (true) { cp_token *token; cp_cv_quals cv_qualifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a cv-qualifier. */ switch (token->keyword) { case RID_CONST: cv_qualifier = TYPE_QUAL_CONST; break; case RID_VOLATILE: cv_qualifier = TYPE_QUAL_VOLATILE; break; case RID_RESTRICT: cv_qualifier = TYPE_QUAL_RESTRICT; break; default: cv_qualifier = TYPE_UNQUALIFIED; break; } if (!cv_qualifier) break; if (cv_quals & cv_qualifier) { error_at (token->location, "duplicate cv-qualifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); cv_quals |= cv_qualifier; } } return cv_quals; } /* Parse an (optional) virt-specifier-seq. virt-specifier-seq: virt-specifier virt-specifier-seq [opt] virt-specifier: override final Returns a bitmask representing the virt-specifiers. */ static cp_virt_specifiers cp_parser_virt_specifier_seq_opt (cp_parser* parser) { cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; while (true) { cp_token *token; cp_virt_specifiers virt_specifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a virt-specifier-qualifier. */ if (token->type != CPP_NAME) break; if (!strcmp (IDENTIFIER_POINTER(token->u.value), "override")) { maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS); virt_specifier = VIRT_SPEC_OVERRIDE; } else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "final")) { maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS); virt_specifier = VIRT_SPEC_FINAL; } else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "__final")) { virt_specifier = VIRT_SPEC_FINAL; } else break; if (virt_specifiers & virt_specifier) { error_at (token->location, "duplicate virt-specifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); virt_specifiers |= virt_specifier; } } return virt_specifiers; } /* Used by handling of trailing-return-types and NSDMI, in which 'this' is in scope even though it isn't real. */ static void inject_this_parameter (tree ctype, cp_cv_quals quals) { tree this_parm; if (current_class_ptr) { /* We don't clear this between NSDMIs. Is it already what we want? */ tree type = TREE_TYPE (TREE_TYPE (current_class_ptr)); if (same_type_ignoring_top_level_qualifiers_p (ctype, type) && cp_type_quals (type) == quals) return; } this_parm = build_this_parm (ctype, quals); /* Clear this first to avoid shortcut in cp_build_indirect_ref. */ current_class_ptr = NULL_TREE; current_class_ref = cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error); current_class_ptr = this_parm; } /* Parse a late-specified return type, if any. This is not a separate non-terminal, but part of a function declarator, which looks like -> trailing-type-specifier-seq abstract-declarator(opt) Returns the type indicated by the type-id. QUALS is either a bitmask of cv_qualifiers or -1 for a non-member function. */ static tree cp_parser_late_return_type_opt (cp_parser* parser, cp_cv_quals quals) { cp_token *token; tree type; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* A late-specified return type is indicated by an initial '->'. */ if (token->type != CPP_DEREF) return NULL_TREE; /* Consume the ->. */ cp_lexer_consume_token (parser->lexer); if (quals >= 0) { /* DR 1207: 'this' is in scope in the trailing return type. */ gcc_assert (current_class_ptr == NULL_TREE); inject_this_parameter (current_class_type, quals); } type = cp_parser_trailing_type_id (parser); if (quals >= 0) current_class_ptr = current_class_ref = NULL_TREE; return type; } /* Parse a declarator-id. declarator-id: id-expression :: [opt] nested-name-specifier [opt] type-name In the `id-expression' case, the value returned is as for cp_parser_id_expression if the id-expression was an unqualified-id. If the id-expression was a qualified-id, then a SCOPE_REF is returned. The first operand is the scope (either a NAMESPACE_DECL or TREE_TYPE), but the second is still just a representation of an unqualified-id. */ static tree cp_parser_declarator_id (cp_parser* parser, bool optional_p) { tree id; /* The expression must be an id-expression. Assume that qualified names are the names of types so that: template <class T> int S<T>::R::i = 3; will work; we must treat `S<T>::R' as the name of a type. Similarly, assume that qualified names are templates, where required, so that: template <class T> int S<T>::R<T>::i = 3; will work, too. */ id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*template_p=*/NULL, /*declarator_p=*/true, optional_p); if (id && BASELINK_P (id)) id = BASELINK_FUNCTIONS (id); return id; } /* Parse a type-id. type-id: type-specifier-seq abstract-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_type_id_1 (cp_parser* parser, bool is_template_arg, bool is_trailing_return) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *abstract_declarator; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, is_trailing_return, &type_specifier_seq); if (type_specifier_seq.type == error_mark_node) return error_mark_node; /* There might or might not be an abstract declarator. */ cp_parser_parse_tentatively (parser); /* Look for the declarator. */ abstract_declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Check to see if there really was a declarator. */ if (!cp_parser_parse_definitely (parser)) abstract_declarator = NULL; if (type_specifier_seq.type && type_uses_auto (type_specifier_seq.type)) { /* A type-id with type 'auto' is only ok if the abstract declarator is a function declarator with a late-specified return type. */ if (abstract_declarator && abstract_declarator->kind == cdk_function && abstract_declarator->u.function.late_return_type) /* OK */; else { error ("invalid use of %<auto%>"); return error_mark_node; } } return groktypename (&type_specifier_seq, abstract_declarator, is_template_arg); } static tree cp_parser_type_id (cp_parser *parser) { return cp_parser_type_id_1 (parser, false, false); } static tree cp_parser_template_type_arg (cp_parser *parser) { tree r; const char *saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in template arguments"); r = cp_parser_type_id_1 (parser, true, false); parser->type_definition_forbidden_message = saved_message; return r; } static tree cp_parser_trailing_type_id (cp_parser *parser) { return cp_parser_type_id_1 (parser, false, true); } /* Parse a type-specifier-seq. type-specifier-seq: type-specifier type-specifier-seq [opt] GNU extension: type-specifier-seq: attributes type-specifier-seq [opt] If IS_DECLARATION is true, we are at the start of a "condition" or exception-declaration, so we might be followed by a declarator-id. If IS_TRAILING_RETURN is true, we are in a trailing-return-type, i.e. we've just seen "->". Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */ static void cp_parser_type_specifier_seq (cp_parser* parser, bool is_declaration, bool is_trailing_return, cp_decl_specifier_seq *type_specifier_seq) { bool seen_type_specifier = false; cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL; cp_token *start_token = NULL; /* Clear the TYPE_SPECIFIER_SEQ. */ clear_decl_specs (type_specifier_seq); /* In the context of a trailing return type, enum E { } is an elaborated-type-specifier followed by a function-body, not an enum-specifier. */ if (is_trailing_return) flags |= CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS; /* Parse the type-specifiers and attributes. */ while (true) { tree type_specifier; bool is_cv_qualifier; /* Check for attributes first. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) { type_specifier_seq->attributes = chainon (type_specifier_seq->attributes, cp_parser_attributes_opt (parser)); continue; } /* record the token of the beginning of the type specifier seq, for error reporting purposes*/ if (!start_token) start_token = cp_lexer_peek_token (parser->lexer); /* Look for the type-specifier. */ type_specifier = cp_parser_type_specifier (parser, flags, type_specifier_seq, /*is_declaration=*/false, NULL, &is_cv_qualifier); if (!type_specifier) { /* If the first type-specifier could not be found, this is not a type-specifier-seq at all. */ if (!seen_type_specifier) { cp_parser_error (parser, "expected type-specifier"); type_specifier_seq->type = error_mark_node; return; } /* If subsequent type-specifiers could not be found, the type-specifier-seq is complete. */ break; } seen_type_specifier = true; /* The standard says that a condition can be: type-specifier-seq declarator = assignment-expression However, given: struct S {}; if (int S = ...) we should treat the "S" as a declarator, not as a type-specifier. The standard doesn't say that explicitly for type-specifier-seq, but it does say that for decl-specifier-seq in an ordinary declaration. Perhaps it would be clearer just to allow a decl-specifier-seq here, and then add a semantic restriction that if any decl-specifiers that are not type-specifiers appear, the program is invalid. */ if (is_declaration && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; } cp_parser_check_decl_spec (type_specifier_seq, start_token->location); } /* Parse a parameter-declaration-clause. parameter-declaration-clause: parameter-declaration-list [opt] ... [opt] parameter-declaration-list , ... Returns a representation for the parameter declarations. A return value of NULL indicates a parameter-declaration-clause consisting only of an ellipsis. */ static tree cp_parser_parameter_declaration_clause (cp_parser* parser) { tree parameters; cp_token *token; bool ellipsis_p; bool is_error; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for trivial parameter-declaration-clauses. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } else if (token->type == CPP_CLOSE_PAREN) /* There are no parameters. */ { #ifndef NO_IMPLICIT_EXTERN_C if (in_system_header && current_class_type == NULL && current_lang_name == lang_name_c) return NULL_TREE; else #endif return void_list_node; } /* Check for `(void)', too, which is a special case. */ else if (token->keyword == RID_VOID && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_CLOSE_PAREN)) { /* Consume the `void' token. */ cp_lexer_consume_token (parser->lexer); /* There are no parameters. */ return void_list_node; } /* Parse the parameter-declaration-list. */ parameters = cp_parser_parameter_declaration_list (parser, &is_error); /* If a parse error occurred while parsing the parameter-declaration-list, then the entire parameter-declaration-clause is erroneous. */ if (is_error) return NULL; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', the clause should terminate with an ellipsis. */ if (token->type == CPP_COMMA) { /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* Expect an ellipsis. */ ellipsis_p = (cp_parser_require (parser, CPP_ELLIPSIS, RT_ELLIPSIS) != NULL); } /* It might also be `...' if the optional trailing `,' was omitted. */ else if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* And remember that we saw it. */ ellipsis_p = true; } else ellipsis_p = false; /* Finish the parameter list. */ if (!ellipsis_p) parameters = chainon (parameters, void_list_node); return parameters; } /* Parse a parameter-declaration-list. parameter-declaration-list: parameter-declaration parameter-declaration-list , parameter-declaration Returns a representation of the parameter-declaration-list, as for cp_parser_parameter_declaration_clause. However, the `void_list_node' is never appended to the list. Upon return, *IS_ERROR will be true iff an error occurred. */ static tree cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error) { tree parameters = NULL_TREE; tree *tail = &parameters; bool saved_in_unbraced_linkage_specification_p; int index = 0; /* Assume all will go well. */ *is_error = false; /* The special considerations that apply to a function within an unbraced linkage specifications do not apply to the parameters to the function. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Look for more parameters. */ while (true) { cp_parameter_declarator *parameter; tree decl = error_mark_node; bool parenthesized_p = false; /* Parse the parameter. */ parameter = cp_parser_parameter_declaration (parser, /*template_parm_p=*/false, &parenthesized_p); /* We don't know yet if the enclosing context is deprecated, so wait and warn in grokparms if appropriate. */ deprecated_state = DEPRECATED_SUPPRESS; if (parameter) decl = grokdeclarator (parameter->declarator, &parameter->decl_specifiers, PARM, parameter->default_argument != NULL_TREE, &parameter->decl_specifiers.attributes); deprecated_state = DEPRECATED_NORMAL; /* If a parse error occurred parsing the parameter declaration, then the entire parameter-declaration-list is erroneous. */ if (decl == error_mark_node) { *is_error = true; parameters = error_mark_node; break; } if (parameter->decl_specifiers.attributes) cplus_decl_attributes (&decl, parameter->decl_specifiers.attributes, 0); if (DECL_NAME (decl)) decl = pushdecl (decl); if (decl != error_mark_node) { retrofit_lang_decl (decl); DECL_PARM_INDEX (decl) = ++index; DECL_PARM_LEVEL (decl) = function_parm_depth (); } /* Add the new parameter to the list. */ *tail = build_tree_list (parameter->default_argument, decl); tail = &TREE_CHAIN (*tail); /* Peek at the next token. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) /* These are for Objective-C++ */ || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) /* The parameter-declaration-list is complete. */ break; else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, then the list is complete. */ if (token->type == CPP_ELLIPSIS) break; /* Otherwise, there must be more parameters. Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* When parsing something like: int i(float f, double d) we can tell after seeing the declaration for "f" that we are not looking at an initialization of a variable "i", but rather at the declaration of a function "i". Due to the fact that the parsing of template arguments (as specified to a template-id) requires backtracking we cannot use this technique when inside a template argument list. */ if (!parser->in_template_argument_list_p && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) /* However, a parameter-declaration of the form "foat(f)" (which is a valid declaration of a parameter "f") can also be interpreted as an expression (the conversion of "f" to "float"). */ && !parenthesized_p) cp_parser_commit_to_tentative_parse (parser); } else { cp_parser_error (parser, "expected %<,%> or %<...%>"); if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); break; } } parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; return parameters; } /* Parse a parameter declaration. parameter-declaration: decl-specifier-seq ... [opt] declarator decl-specifier-seq declarator = assignment-expression decl-specifier-seq ... [opt] abstract-declarator [opt] decl-specifier-seq abstract-declarator [opt] = assignment-expression If TEMPLATE_PARM_P is TRUE, then this parameter-declaration declares a template parameter. (In that case, a non-nested `>' token encountered during the parsing of the assignment-expression is not interpreted as a greater-than operator.) Returns a representation of the parameter, or NULL if an error occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is of the form "(p)". */ static cp_parameter_declarator * cp_parser_parameter_declaration (cp_parser *parser, bool template_parm_p, bool *parenthesized_p) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; cp_declarator *declarator; tree default_argument; cp_token *token = NULL, *declarator_token_start = NULL; const char *saved_message; /* In a template parameter, `>' is not an operator. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ /* Type definitions may not appear in parameter types. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in parameter types"); /* Parse the declaration-specifiers. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &decl_specifiers, &declares_class_or_enum); /* Complain about missing 'typename' or other invalid type names. */ if (!decl_specifiers.any_type_specifiers_p) cp_parser_parse_and_diagnose_invalid_type_name (parser); /* If an error occurred, there's no reason to attempt to parse the rest of the declaration. */ if (cp_parser_error_occurred (parser)) { parser->type_definition_forbidden_message = saved_message; return NULL; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a `)', `,', `=', `>', or `...', then there is no declarator. However, when variadic templates are enabled, there may be a declarator following `...'. */ if (token->type == CPP_CLOSE_PAREN || token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) { declarator = NULL; if (parenthesized_p) *parenthesized_p = false; } /* Otherwise, there should be a declarator. */ else { bool saved_default_arg_ok_p = parser->default_arg_ok_p; parser->default_arg_ok_p = false; /* After seeing a decl-specifier-seq, if the next token is not a "(", there is no possibility that the code is a valid expression. Therefore, if parsing tentatively, we commit at this point. */ if (!parser->in_template_argument_list_p /* In an expression context, having seen: (int((char ... we cannot be sure whether we are looking at a function-type (taking a "char" as a parameter) or a cast of some object of type "char" to "int". */ && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) cp_parser_commit_to_tentative_parse (parser); /* Parse the declarator. */ declarator_token_start = token; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, parenthesized_p, /*member_p=*/false); parser->default_arg_ok_p = saved_default_arg_ok_p; /* After the declarator, allow more attributes. */ decl_specifiers.attributes = chainon (decl_specifiers.attributes, cp_parser_attributes_opt (parser)); } /* If the next token is an ellipsis, and we have not seen a declarator name, and the type of the declarator contains parameter packs but it is not a TYPE_PACK_EXPANSION, then we actually have a parameter pack expansion expression. Otherwise, leave the ellipsis for a C-style variadic function. */ token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { tree type = decl_specifiers.type; if (type && DECL_P (type)) type = TREE_TYPE (type); if (type && TREE_CODE (type) != TYPE_PACK_EXPANSION && declarator_can_be_parameter_pack (declarator) && (!declarator || !declarator->parameter_pack_p) && uses_parameter_packs (type)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); /* Build a pack expansion type */ if (declarator) declarator->parameter_pack_p = true; else decl_specifiers.type = make_pack_expansion (type); } } /* The restriction on defining new types applies only to the type of the parameter, not to the default argument. */ parser->type_definition_forbidden_message = saved_message; /* If the next token is `=', then process a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { token = cp_lexer_peek_token (parser->lexer); /* If we are defining a class, then the tokens that make up the default argument must be saved and processed later. */ if (!template_parm_p && at_class_scope_p () && TYPE_BEING_DEFINED (current_class_type) && !LAMBDA_TYPE_P (current_class_type)) default_argument = cp_parser_cache_defarg (parser, /*nsdmi=*/false); /* Outside of a class definition, we can just parse the assignment-expression. */ else default_argument = cp_parser_default_argument (parser, template_parm_p); if (!parser->default_arg_ok_p) { if (flag_permissive) warning (0, "deprecated use of default argument for parameter of non-function"); else { error_at (token->location, "default arguments are only " "permitted for function parameters"); default_argument = NULL_TREE; } } else if ((declarator && declarator->parameter_pack_p) || (decl_specifiers.type && PACK_EXPANSION_P (decl_specifiers.type))) { /* Find the name of the parameter pack. */ cp_declarator *id_declarator = declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; if (id_declarator && id_declarator->kind == cdk_id) error_at (declarator_token_start->location, template_parm_p ? G_("template parameter pack %qD " "cannot have a default argument") : G_("parameter pack %qD cannot have " "a default argument"), id_declarator->u.id.unqualified_name); else error_at (declarator_token_start->location, template_parm_p ? G_("template parameter pack cannot have " "a default argument") : G_("parameter pack cannot have a " "default argument")); default_argument = NULL_TREE; } } else default_argument = NULL_TREE; return make_parameter_declarator (&decl_specifiers, declarator, default_argument); } /* Parse a default argument and return it. TEMPLATE_PARM_P is true if this is a default argument for a non-type template parameter. */ static tree cp_parser_default_argument (cp_parser *parser, bool template_parm_p) { tree default_argument = NULL_TREE; bool saved_greater_than_is_operator_p; bool saved_local_variables_forbidden_p; bool non_constant_p, is_direct_init; /* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is set correctly. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = !template_parm_p; /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; /* Parse the assignment-expression. */ if (template_parm_p) push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_initializer (parser, &is_direct_init, &non_constant_p); if (BRACE_ENCLOSED_INITIALIZER_P (default_argument)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); if (template_parm_p) pop_deferring_access_checks (); parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; return default_argument; } /* Parse a function-body. function-body: compound_statement */ static void cp_parser_function_body (cp_parser *parser) { cp_parser_compound_statement (parser, NULL, false, true); } /* Parse a ctor-initializer-opt followed by a function-body. Return true if a ctor-initializer was present. */ static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser) { tree body, list; bool ctor_initializer_p; const bool check_body_p = DECL_CONSTRUCTOR_P (current_function_decl) && DECL_DECLARED_CONSTEXPR_P (current_function_decl); tree last = NULL; /* Begin the function body. */ body = begin_function_body (); /* Parse the optional ctor-initializer. */ ctor_initializer_p = cp_parser_ctor_initializer_opt (parser); /* If we're parsing a constexpr constructor definition, we need to check that the constructor body is indeed empty. However, before we get to cp_parser_function_body lot of junk has been generated, so we can't just check that we have an empty block. Rather we take a snapshot of the outermost block, and check whether cp_parser_function_body changed its state. */ if (check_body_p) { list = cur_stmt_list; if (STATEMENT_LIST_TAIL (list)) last = STATEMENT_LIST_TAIL (list)->stmt; } /* Parse the function-body. */ cp_parser_function_body (parser); if (check_body_p) check_constexpr_ctor_body (last, list); /* Finish the function body. */ finish_function_body (body); return ctor_initializer_p; } /* Parse an initializer. initializer: = initializer-clause ( expression-list ) Returns an expression representing the initializer. If no initializer is present, NULL_TREE is returned. *IS_DIRECT_INIT is set to FALSE if the `= initializer-clause' production is used, and TRUE otherwise. *IS_DIRECT_INIT is set to TRUE if there is no initializer present. If there is an initializer, and it is not a constant-expression, *NON_CONSTANT_P is set to true; otherwise it is set to false. */ static tree cp_parser_initializer (cp_parser* parser, bool* is_direct_init, bool* non_constant_p) { cp_token *token; tree init; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Let our caller know whether or not this initializer was parenthesized. */ *is_direct_init = (token->type != CPP_EQ); /* Assume that the initializer is constant. */ *non_constant_p = false; if (token->type == CPP_EQ) { /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer-clause. */ init = cp_parser_initializer_clause (parser, non_constant_p); } else if (token->type == CPP_OPEN_PAREN) { VEC(tree,gc) *vec; vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, non_constant_p); if (vec == NULL) return error_mark_node; init = build_tree_list_vec (vec); release_tree_vector (vec); } else if (token->type == CPP_OPEN_BRACE) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); init = cp_parser_braced_list (parser, non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (init) = 1; } else { /* Anything else is an error. */ cp_parser_error (parser, "expected initializer"); init = error_mark_node; } return init; } /* Parse an initializer-clause. initializer-clause: assignment-expression braced-init-list Returns an expression representing the initializer. If the `assignment-expression' production is used the value returned is simply a representation for the expression. Otherwise, calls cp_parser_braced_list. */ static tree cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Assume the expression is constant. */ *non_constant_p = false; /* If it is not a `{', then we are looking at an assignment-expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, non_constant_p); } else initializer = cp_parser_braced_list (parser, non_constant_p); return initializer; } /* Parse a brace-enclosed initializer list. braced-init-list: { initializer-list , [opt] } { } Returns a CONSTRUCTOR. The CONSTRUCTOR_ELTS will be the elements of the initializer-list (or NULL, if the last production is used). The TREE_TYPE for the CONSTRUCTOR will be NULL_TREE. There is no way to detect whether or not the optional trailing `,' was provided. NON_CONSTANT_P is as for cp_parser_initializer. */ static tree cp_parser_braced_list (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Create a CONSTRUCTOR to represent the braced-initializer. */ initializer = make_node (CONSTRUCTOR); /* If it's not a `}', then there is a non-trivial initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) { /* Parse the initializer list. */ CONSTRUCTOR_ELTS (initializer) = cp_parser_initializer_list (parser, non_constant_p); /* A trailing `,' token is allowed. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); } /* Now, there should be a trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); TREE_TYPE (initializer) = init_list_type_node; return initializer; } /* Parse an initializer-list. initializer-list: initializer-clause ... [opt] initializer-list , initializer-clause ... [opt] GNU Extension: initializer-list: designation initializer-clause ...[opt] initializer-list , designation initializer-clause ...[opt] designation: . identifier = identifier : [ constant-expression ] = Returns a VEC of constructor_elt. The VALUE of each elt is an expression for the initializer. If the INDEX of the elt is non-NULL, it is the IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is as for cp_parser_initializer. */ static VEC(constructor_elt,gc) * cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p) { VEC(constructor_elt,gc) *v = NULL; /* Assume all of the expressions are constant. */ *non_constant_p = false; /* Parse the rest of the list. */ while (true) { cp_token *token; tree designator; tree initializer; bool clause_non_constant_p; /* If the next token is an identifier and the following one is a colon, we are looking at the GNU designated-initializer syntax. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_NAME) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON) { /* Warn the user that they are using an extension. */ pedwarn (input_location, OPT_pedantic, "ISO C++ does not allow designated initializers"); /* Consume the identifier. */ designator = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); } /* Also handle the C99 syntax, '. id ='. */ else if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_DOT) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ) { /* Warn the user that they are using an extension. */ pedwarn (input_location, OPT_pedantic, "ISO C++ does not allow C99 designated initializers"); /* Consume the `.'. */ cp_lexer_consume_token (parser->lexer); /* Consume the identifier. */ designator = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); } /* Also handle C99 array designators, '[ const ] ='. */ else if (cp_parser_allow_gnu_extensions_p (parser) && !c_dialect_objc () && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* In C++11, [ could start a lambda-introducer. */ cp_parser_parse_tentatively (parser); cp_lexer_consume_token (parser->lexer); designator = cp_parser_constant_expression (parser, false, NULL); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); cp_parser_require (parser, CPP_EQ, RT_EQ); if (!cp_parser_parse_definitely (parser)) designator = NULL_TREE; } else designator = NULL_TREE; /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &clause_non_constant_p); /* If any clause is non-constant, so is the entire initializer. */ if (clause_non_constant_p) *non_constant_p = true; /* If we have an ellipsis, this is an initializer pack expansion. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Turn the initializer into an initializer expansion. */ initializer = make_pack_expansion (initializer); } /* Add it to the vector. */ CONSTRUCTOR_APPEND_ELT (v, designator, initializer); /* If the next token is not a comma, we have reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If the next token is a `}', then we're still done. An initializer-clause can have a trailing `,' after the initializer-list and before the closing `}'. */ if (token->type == CPP_CLOSE_BRACE) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return v; } /* Classes [gram.class] */ /* Parse a class-name. class-name: identifier template-id TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used to indicate that names looked up in dependent types should be assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template' keyword has been used to indicate that the name that appears next is a template. TAG_TYPE indicates the explicit tag given before the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class is the class being defined in a class-head. Returns the TYPE_DECL representing the class. */ static tree cp_parser_class_name (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, enum tag_types tag_type, bool check_dependency_p, bool class_head_p, bool is_declaration) { tree decl; tree scope; bool typename_p; cp_token *token; tree identifier = NULL_TREE; /* All class-names start with an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID) { cp_parser_error (parser, "expected class-name"); return error_mark_node; } /* PARSER->SCOPE can be cleared when parsing the template-arguments to a template-id, so we save it here. */ scope = parser->scope; if (scope == error_mark_node) return error_mark_node; /* Any name names a type if we're following the `typename' keyword in a qualified name where the enclosing scope is type-dependent. */ typename_p = (typename_keyword_p && scope && TYPE_P (scope) && dependent_type_p (scope)); /* Handle the common case (an identifier, but not a template-id) efficiently. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) { cp_token *identifier_token; bool ambiguous_p; /* Look for the identifier. */ identifier_token = cp_lexer_peek_token (parser->lexer); ambiguous_p = identifier_token->ambiguous_p; identifier = cp_parser_identifier (parser); /* If the next token isn't an identifier, we are certainly not looking at a class-name. */ if (identifier == error_mark_node) decl = error_mark_node; /* If we know this is a type-name, there's no need to look it up. */ else if (typename_p) decl = identifier; else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (ambiguous_p) { cp_parser_simulate_error (parser); return error_mark_node; } /* If the next token is a `::', then the name must be a type name. [basic.lookup.qual] During the lookup for a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) tag_type = typename_type; /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, check_dependency_p, &ambiguous_decls, identifier_token->location); if (ambiguous_decls) { if (cp_parser_parsing_tentatively (parser)) cp_parser_simulate_error (parser); return error_mark_node; } } } else { /* Try a template-id. */ decl = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, is_declaration); if (decl == error_mark_node) return error_mark_node; } decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p); /* If this is a typename, create a TYPENAME_TYPE. */ if (typename_p && decl != error_mark_node) { decl = make_typename_type (scope, decl, typename_type, /*complain=*/tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } decl = strip_using_decl (decl); /* Check to see that it is really the name of a class. */ if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* Situations like this: template <typename T> struct A { typename T::template X<int>::I i; }; are problematic. Is `T::template X<int>' a class-name? The standard does not seem to be definitive, but there is no other valid interpretation of the following `::'. Therefore, those names are considered class-names. */ { decl = make_typename_type (scope, decl, tag_type, tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } else if (TREE_CODE (decl) != TYPE_DECL || TREE_TYPE (decl) == error_mark_node || !MAYBE_CLASS_TYPE_P (TREE_TYPE (decl)) /* In Objective-C 2.0, a classname followed by '.' starts a dot-syntax expression, and it's not a type-name. */ || (c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && objc_is_class_name (decl))) decl = error_mark_node; if (decl == error_mark_node) cp_parser_error (parser, "expected class-name"); else if (identifier && !parser->scope) maybe_note_name_used_in_class (identifier, decl); return decl; } /* Parse a class-specifier. class-specifier: class-head { member-specification [opt] } Returns the TREE_TYPE representing the class. */ static tree cp_parser_class_specifier_1 (cp_parser* parser) { tree type; tree attributes = NULL_TREE; bool nested_name_specifier_p; unsigned saved_num_template_parameter_lists; bool saved_in_function_body; unsigned char in_statement; bool in_switch_statement_p; bool saved_in_unbraced_linkage_specification_p; tree old_scope = NULL_TREE; tree scope = NULL_TREE; cp_token *closing_brace; push_deferring_access_checks (dk_no_deferred); /* Parse the class-head. */ type = cp_parser_class_head (parser, &nested_name_specifier_p); /* If the class-head was a semantic disaster, skip the entire body of the class. */ if (!type) { cp_parser_skip_to_end_of_block_or_statement (parser); pop_deferring_access_checks (); return error_mark_node; } /* Look for the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) { pop_deferring_access_checks (); return error_mark_node; } /* Issue an error message if type-definitions are forbidden here. */ cp_parser_check_type_definition (parser); /* Remember that we are defining one more class. */ ++parser->num_classes_being_defined; /* Inside the class, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* We are not in a function body. */ saved_in_function_body = parser->in_function_body; parser->in_function_body = false; /* Or in a loop. */ in_statement = parser->in_statement; parser->in_statement = 0; /* Or in a switch. */ in_switch_statement_p = parser->in_switch_statement_p; parser->in_switch_statement_p = false; /* We are not immediately inside an extern "lang" block. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Start the class. */ if (nested_name_specifier_p) { scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type)); old_scope = push_inner_scope (scope); } type = begin_class_definition (type); if (type == error_mark_node) /* If the type is erroneous, skip the entire body of the class. */ cp_parser_skip_to_closing_brace (parser); else /* Parse the member-specification. */ cp_parser_member_specification_opt (parser); /* Look for the trailing `}'. */ closing_brace = cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); /* Look for trailing attributes to apply to this class. */ if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_attributes_opt (parser); if (type != error_mark_node) type = finish_struct (type, attributes); if (nested_name_specifier_p) pop_inner_scope (old_scope, scope); /* We've finished a type definition. Check for the common syntax error of forgetting a semicolon after the definition. We need to be careful, as we can't just check for not-a-semicolon and be done with it; the user might have typed: class X { } c = ...; class X { } *p = ...; and so forth. Instead, enumerate all the possible tokens that might follow this production; if we don't see one of them, then complain and silently insert the semicolon. */ { cp_token *token = cp_lexer_peek_token (parser->lexer); bool want_semicolon = true; switch (token->type) { case CPP_NAME: case CPP_SEMICOLON: case CPP_MULT: case CPP_AND: case CPP_OPEN_PAREN: case CPP_CLOSE_PAREN: case CPP_COMMA: want_semicolon = false; break; /* While it's legal for type qualifiers and storage class specifiers to follow type definitions in the grammar, only compiler testsuites contain code like that. Assume that if we see such code, then what we're really seeing is a case like: class X { } const <type> var = ...; or class Y { } static <type> func (...) ... i.e. the qualifier or specifier applies to the next declaration. To do so, however, we need to look ahead one more token to see if *that* token is a type specifier. This code could be improved to handle: class Z { } static const <type> var = ...; */ case CPP_KEYWORD: if (keyword_is_decl_specifier (token->keyword)) { cp_token *lookahead = cp_lexer_peek_nth_token (parser->lexer, 2); /* Handling user-defined types here would be nice, but very tricky. */ want_semicolon = (lookahead->type == CPP_KEYWORD && keyword_begins_type_specifier (lookahead->keyword)); } break; default: break; } /* If we don't have a type, then something is very wrong and we shouldn't try to do anything clever. Likewise for not seeing the closing brace. */ if (closing_brace && TYPE_P (type) && want_semicolon) { cp_token_position prev = cp_lexer_previous_token_position (parser->lexer); cp_token *prev_token = cp_lexer_token_at (parser->lexer, prev); location_t loc = prev_token->location; if (CLASSTYPE_DECLARED_CLASS (type)) error_at (loc, "expected %<;%> after class definition"); else if (TREE_CODE (type) == RECORD_TYPE) error_at (loc, "expected %<;%> after struct definition"); else if (TREE_CODE (type) == UNION_TYPE) error_at (loc, "expected %<;%> after union definition"); else gcc_unreachable (); /* Unget one token and smash it to look as though we encountered a semicolon in the input stream. */ cp_lexer_set_token_position (parser->lexer, prev); token = cp_lexer_peek_token (parser->lexer); token->type = CPP_SEMICOLON; token->keyword = RID_MAX; } } /* If this class is not itself within the scope of another class, then we need to parse the bodies of all of the queued function definitions. Note that the queued functions defined in a class are not always processed immediately following the class-specifier for that class. Consider: struct A { struct B { void f() { sizeof (A); } }; }; If `f' were processed before the processing of `A' were completed, there would be no way to compute the size of `A'. Note that the nesting we are interested in here is lexical -- not the semantic nesting given by TYPE_CONTEXT. In particular, for: struct A { struct B; }; struct A::B { void f() { } }; there is no need to delay the parsing of `A::B::f'. */ if (--parser->num_classes_being_defined == 0) { tree decl; tree class_type = NULL_TREE; tree pushed_scope = NULL_TREE; unsigned ix; cp_default_arg_entry *e; tree save_ccp, save_ccr; /* In a first pass, parse default arguments to the functions. Then, in a second pass, parse the bodies of the functions. This two-phased approach handles cases like: struct S { void f() { g(); } void g(int i = 3); }; */ FOR_EACH_VEC_ELT (cp_default_arg_entry, unparsed_funs_with_default_args, ix, e) { decl = e->decl; /* If there are default arguments that have not yet been processed, take care of them now. */ if (class_type != e->class_type) { if (pushed_scope) pop_scope (pushed_scope); class_type = e->class_type; pushed_scope = push_scope (class_type); } /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (decl); /* Parse the default argument expressions. */ cp_parser_late_parsing_default_args (parser, decl); /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); } VEC_truncate (cp_default_arg_entry, unparsed_funs_with_default_args, 0); /* Now parse any NSDMIs. */ save_ccp = current_class_ptr; save_ccr = current_class_ref; FOR_EACH_VEC_ELT (tree, unparsed_nsdmis, ix, decl) { if (class_type != DECL_CONTEXT (decl)) { if (pushed_scope) pop_scope (pushed_scope); class_type = DECL_CONTEXT (decl); pushed_scope = push_scope (class_type); } inject_this_parameter (class_type, TYPE_UNQUALIFIED); cp_parser_late_parsing_nsdmi (parser, decl); } VEC_truncate (tree, unparsed_nsdmis, 0); current_class_ptr = save_ccp; current_class_ref = save_ccr; if (pushed_scope) pop_scope (pushed_scope); /* Now parse the body of the functions. */ FOR_EACH_VEC_ELT (tree, unparsed_funs_with_definitions, ix, decl) cp_parser_late_parsing_for_member (parser, decl); VEC_truncate (tree, unparsed_funs_with_definitions, 0); } /* Put back any saved access checks. */ pop_deferring_access_checks (); /* Restore saved state. */ parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; parser->in_function_body = saved_in_function_body; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; return type; } static tree cp_parser_class_specifier (cp_parser* parser) { tree ret; timevar_push (TV_PARSE_STRUCT); ret = cp_parser_class_specifier_1 (parser); timevar_pop (TV_PARSE_STRUCT); return ret; } /* Parse a class-head. class-head: class-key identifier [opt] base-clause [opt] class-key nested-name-specifier identifier class-virt-specifier [opt] base-clause [opt] class-key nested-name-specifier [opt] template-id base-clause [opt] class-virt-specifier: final GNU Extensions: class-key attributes identifier [opt] base-clause [opt] class-key attributes nested-name-specifier identifier base-clause [opt] class-key attributes nested-name-specifier [opt] template-id base-clause [opt] Upon return BASES is initialized to the list of base classes (or NULL, if there are none) in the same form returned by cp_parser_base_clause. Returns the TYPE of the indicated class. Sets *NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions involving a nested-name-specifier was used, and FALSE otherwise. Returns error_mark_node if this is not a class-head. Returns NULL_TREE if the class-head is syntactically valid, but semantically invalid in a way that means we should skip the entire body of the class. */ static tree cp_parser_class_head (cp_parser* parser, bool* nested_name_specifier_p) { tree nested_name_specifier; enum tag_types class_key; tree id = NULL_TREE; tree type = NULL_TREE; tree attributes; tree bases; cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; bool template_id_p = false; bool qualified_p = false; bool invalid_nested_name_p = false; bool invalid_explicit_specialization_p = false; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; tree pushed_scope = NULL_TREE; unsigned num_templates; cp_token *type_start_token = NULL, *nested_name_specifier_token_start = NULL; /* Assume no nested-name-specifier will be present. */ *nested_name_specifier_p = false; /* Assume no template parameter lists will be used in defining the type. */ num_templates = 0; parser->colon_corrects_to_scope_p = false; /* Look for the class-key. */ class_key = cp_parser_class_key (parser); if (class_key == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* If the next token is `::', that is invalid -- but sometimes people do try to write: struct ::S {}; Handle this gracefully by accepting the extra qualifier, and then issuing an error about it later if this really is a class-head. If it turns out just to be an elaborated type specifier, remain silent. */ if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)) qualified_p = true; push_deferring_access_checks (dk_no_check); /* Determine the name of the class. Begin by looking for an optional nested-name-specifier. */ nested_name_specifier_token_start = cp_lexer_peek_token (parser->lexer); nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false); /* If there was a nested-name-specifier, then there *must* be an identifier. */ if (nested_name_specifier) { type_start_token = cp_lexer_peek_token (parser->lexer); /* Although the grammar says `identifier', it really means `class-name' or `template-name'. You are only allowed to define a class that has already been declared with this syntax. The proposed resolution for Core Issue 180 says that wherever you see `class T::X' you should treat `X' as a type-name. It is OK to define an inaccessible class; for example: class A { class B; }; class A::B {}; We do not know if we will see a class-name, or a template-name. We look for a class-name first, in case the class-name is a template-id; if we looked for the template-name first we would stop after the template-name. */ cp_parser_parse_tentatively (parser); type = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, class_type, /*check_dependency_p=*/false, /*class_head_p=*/true, /*is_declaration=*/false); /* If that didn't work, ignore the nested-name-specifier. */ if (!cp_parser_parse_definitely (parser)) { invalid_nested_name_p = true; type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_identifier (parser); if (id == error_mark_node) id = NULL_TREE; } /* If we could not find a corresponding TYPE, treat this declaration like an unqualified declaration. */ if (type == error_mark_node) nested_name_specifier = NULL_TREE; /* Otherwise, count the number of templates used in TYPE and its containing scopes. */ else { tree scope; for (scope = TREE_TYPE (type); scope && TREE_CODE (scope) != NAMESPACE_DECL; scope = (TYPE_P (scope) ? TYPE_CONTEXT (scope) : DECL_CONTEXT (scope))) if (TYPE_P (scope) && CLASS_TYPE_P (scope) && CLASSTYPE_TEMPLATE_INFO (scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (scope)) ++num_templates; } } /* Otherwise, the identifier is optional. */ else { /* We don't know whether what comes next is a template-id, an identifier, or nothing at all. */ cp_parser_parse_tentatively (parser); /* Check for a template-id. */ type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*is_declaration=*/true); /* If that didn't work, it could still be an identifier. */ if (!cp_parser_parse_definitely (parser)) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_identifier (parser); } else id = NULL_TREE; } else { template_id_p = true; ++num_templates; } } pop_deferring_access_checks (); if (id) { cp_parser_check_for_invalid_template_id (parser, id, type_start_token->location); } virt_specifiers = cp_parser_virt_specifier_seq_opt (parser); /* If it's not a `:' or a `{' then we can't really be looking at a class-head, since a class-head only appears as part of a class-specifier. We have to detect this situation before calling xref_tag, since that has irreversible side-effects. */ if (!cp_parser_next_token_starts_class_definition_p (parser)) { cp_parser_error (parser, "expected %<{%> or %<:%>"); type = error_mark_node; goto out; } /* At this point, we're going ahead with the class-specifier, even if some other problem occurs. */ cp_parser_commit_to_tentative_parse (parser); if (virt_specifiers & VIRT_SPEC_OVERRIDE) { cp_parser_error (parser, "cannot specify %<override%> for a class"); type = error_mark_node; goto out; } /* Issue the error about the overly-qualified name now. */ if (qualified_p) { cp_parser_error (parser, "global qualification of class name is invalid"); type = error_mark_node; goto out; } else if (invalid_nested_name_p) { cp_parser_error (parser, "qualified name does not name a class"); type = error_mark_node; goto out; } else if (nested_name_specifier) { tree scope; /* Reject typedef-names in class heads. */ if (!DECL_IMPLICIT_TYPEDEF_P (type)) { error_at (type_start_token->location, "invalid class name in declaration of %qD", type); type = NULL_TREE; goto done; } /* Figure out in what scope the declaration is being placed. */ scope = current_scope (); /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ if (scope && !is_ancestor (scope, nested_name_specifier)) { if (at_namespace_scope_p ()) error_at (type_start_token->location, "declaration of %qD in namespace %qD which does not " "enclose %qD", type, scope, nested_name_specifier); else error_at (type_start_token->location, "declaration of %qD in %qD which does not enclose %qD", type, scope, nested_name_specifier); type = NULL_TREE; goto done; } /* [dcl.meaning] A declarator-id shall not be qualified except for the definition of a ... nested class outside of its class ... [or] the definition or explicit instantiation of a class member of a namespace outside of its namespace. */ if (scope == nested_name_specifier) { permerror (nested_name_specifier_token_start->location, "extra qualification not allowed"); nested_name_specifier = NULL_TREE; num_templates = 0; } } /* An explicit-specialization must be preceded by "template <>". If it is not, try to recover gracefully. */ if (at_namespace_scope_p () && parser->num_template_parameter_lists == 0 && template_id_p) { error_at (type_start_token->location, "an explicit specialization must be preceded by %<template <>%>"); invalid_explicit_specialization_p = true; /* Take the same action that would have been taken by cp_parser_explicit_specialization. */ ++parser->num_template_parameter_lists; begin_specialization (); } /* There must be no "return" statements between this point and the end of this function; set "type "to the correct return value and use "goto done;" to return. */ /* Make sure that the right number of template parameters were present. */ if (!cp_parser_check_template_parameters (parser, num_templates, type_start_token->location, /*declarator=*/NULL)) { /* If something went wrong, there is no point in even trying to process the class-definition. */ type = NULL_TREE; goto done; } /* Look up the type. */ if (template_id_p) { if (TREE_CODE (id) == TEMPLATE_ID_EXPR && (DECL_FUNCTION_TEMPLATE_P (TREE_OPERAND (id, 0)) || TREE_CODE (TREE_OPERAND (id, 0)) == OVERLOAD)) { error_at (type_start_token->location, "function template %qD redeclared as a class template", id); type = error_mark_node; } else { type = TREE_TYPE (id); type = maybe_process_partial_specialization (type); } if (nested_name_specifier) pushed_scope = push_scope (nested_name_specifier); } else if (nested_name_specifier) { tree class_type; /* Given: template <typename T> struct S { struct T }; template <typename T> struct S<T>::T { }; we will get a TYPENAME_TYPE when processing the definition of `S::T'. We need to resolve it to the actual type before we try to define it. */ if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE) { class_type = resolve_typename_type (TREE_TYPE (type), /*only_current_p=*/false); if (TREE_CODE (class_type) != TYPENAME_TYPE) type = TYPE_NAME (class_type); else { cp_parser_error (parser, "could not resolve typename type"); type = error_mark_node; } } if (maybe_process_partial_specialization (TREE_TYPE (type)) == error_mark_node) { type = NULL_TREE; goto done; } class_type = current_class_type; /* Enter the scope indicated by the nested-name-specifier. */ pushed_scope = push_scope (nested_name_specifier); /* Get the canonical version of this type. */ type = TYPE_MAIN_DECL (TREE_TYPE (type)); if (PROCESSING_REAL_TEMPLATE_DECL_P () && !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type))) { type = push_template_decl (type); if (type == error_mark_node) { type = NULL_TREE; goto done; } } type = TREE_TYPE (type); *nested_name_specifier_p = true; } else /* The name is not a nested name. */ { /* If the class was unnamed, create a dummy name. */ if (!id) id = make_anon_name (); type = xref_tag (class_key, id, /*tag_scope=*/ts_current, parser->num_template_parameter_lists); } /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type); cp_parser_check_class_key (class_key, type); /* If this type was already complete, and we see another definition, that's an error. */ if (type != error_mark_node && COMPLETE_TYPE_P (type)) { error_at (type_start_token->location, "redefinition of %q#T", type); error_at (type_start_token->location, "previous definition of %q+#T", type); type = NULL_TREE; goto done; } else if (type == error_mark_node) type = NULL_TREE; if (type) { /* Apply attributes now, before any use of the class as a template argument in its base list. */ cplus_decl_attributes (&type, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE); fixup_attribute_variants (type); } /* We will have entered the scope containing the class; the names of base classes should be looked up in that context. For example: struct A { struct B {}; struct C; }; struct A::C : B {}; is valid. */ /* Get the list of base-classes, if there is one. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) bases = cp_parser_base_clause (parser); else bases = NULL_TREE; /* If we're really defining a class, process the base classes. If they're invalid, fail. */ if (type && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && !xref_basetypes (type, bases)) type = NULL_TREE; done: /* Leave the scope given by the nested-name-specifier. We will enter the class scope itself while processing the members. */ if (pushed_scope) pop_scope (pushed_scope); if (invalid_explicit_specialization_p) { end_specialization (); --parser->num_template_parameter_lists; } if (type) DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location; if (type && (virt_specifiers & VIRT_SPEC_FINAL)) CLASSTYPE_FINAL (type) = 1; out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; return type; } /* Parse a class-key. class-key: class struct union Returns the kind of class-key specified, or none_type to indicate error. */ static enum tag_types cp_parser_class_key (cp_parser* parser) { cp_token *token; enum tag_types tag_type; /* Look for the class-key. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_KEY); if (!token) return none_type; /* Check to see if the TOKEN is a class-key. */ tag_type = cp_parser_token_is_class_key (token); if (!tag_type) cp_parser_error (parser, "expected class-key"); return tag_type; } /* Parse an (optional) member-specification. member-specification: member-declaration member-specification [opt] access-specifier : member-specification [opt] */ static void cp_parser_member_specification_opt (cp_parser* parser) { while (true) { cp_token *token; enum rid keyword; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `}', or EOF then we've seen all the members. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; /* See if this token is a keyword. */ keyword = token->keyword; switch (keyword) { case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); /* Remember which access-specifier is active. */ current_access_specifier = token->u.value; /* Look for the `:'. */ cp_parser_require (parser, CPP_COLON, RT_COLON); break; default: /* Accept #pragmas at class scope. */ if (token->type == CPP_PRAGMA) { cp_parser_pragma (parser, pragma_external); break; } /* Otherwise, the next construction must be a member-declaration. */ cp_parser_member_declaration (parser); } } } /* Parse a member-declaration. member-declaration: decl-specifier-seq [opt] member-declarator-list [opt] ; function-definition ; [opt] :: [opt] nested-name-specifier template [opt] unqualified-id ; using-declaration template-declaration alias-declaration member-declarator-list: member-declarator member-declarator-list , member-declarator member-declarator: declarator pure-specifier [opt] declarator constant-initializer [opt] identifier [opt] : constant-expression GNU Extensions: member-declaration: __extension__ member-declaration member-declarator: declarator attributes [opt] pure-specifier [opt] declarator attributes [opt] constant-initializer [opt] identifier [opt] attributes [opt] : constant-expression C++0x Extensions: member-declaration: static_assert-declaration */ static void cp_parser_member_declaration (cp_parser* parser) { cp_decl_specifier_seq decl_specifiers; tree prefix_attributes; tree decl; int declares_class_or_enum; bool friend_p; cp_token *token = NULL; cp_token *decl_spec_token_start = NULL; cp_token *initializer_token_start = NULL; int saved_pedantic; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Recurse. */ cp_parser_member_declaration (parser); /* Restore the old value of the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Check for a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* An explicit specialization here is an error condition, and we expect the specialization handler to detect and report this. */ if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); else cp_parser_template_declaration (parser, /*member_p=*/true); return; } /* Check for a using-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) { if (cxx_dialect < cxx0x) { /* Parse the using-declaration. */ cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } else { tree decl; cp_parser_parse_tentatively (parser); decl = cp_parser_alias_declaration (parser); if (cp_parser_parse_definitely (parser)) finish_member_declaration (decl); else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } } /* Check for @defs. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS)) { tree ivar, member; tree ivar_chains = cp_parser_objc_defs_expression (parser); ivar = ivar_chains; while (ivar) { member = ivar; ivar = TREE_CHAIN (member); TREE_CHAIN (member) = NULL_TREE; finish_member_declaration (member); } return; } /* If the next token is `static_assert' we have a static assertion. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC_ASSERT)) { cp_parser_static_assert (parser, /*member_p=*/true); return; } parser->colon_corrects_to_scope_p = false; if (cp_parser_using_declaration (parser, /*access_declaration=*/true)) goto out; /* Parse the decl-specifier-seq. */ decl_spec_token_start = cp_lexer_peek_token (parser->lexer); cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); prefix_attributes = decl_specifiers.attributes; decl_specifiers.attributes = NULL_TREE; /* Check for an invalid type-name. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) goto out; /* If there is no declarator, then the decl-specifier-seq should specify a type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { /* If there was no decl-specifier-seq, and the next token is a `;', then we have something like: struct S { ; }; [class.mem] Each member-declaration shall declare at least one member name of the class. */ if (!decl_specifiers.any_specifiers_p) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (!in_system_header_at (token->location)) pedwarn (token->location, OPT_pedantic, "extra %<;%>"); } else { tree type; /* See if this declaration is a friend. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* If there were decl-specifiers, check to see if there was a class-declaration. */ type = check_tag_decl (&decl_specifiers); /* Nested classes have already been added to the class, but a `friend' needs to be explicitly registered. */ if (friend_p) { /* If the `friend' keyword was present, the friend must be introduced with a class-key. */ if (!declares_class_or_enum && cxx_dialect < cxx0x) pedwarn (decl_spec_token_start->location, OPT_pedantic, "in C++03 a class-key must be used " "when declaring a friend"); /* In this case: template <typename T> struct A { friend struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by check_tag_decl. */ if (!type) { type = decl_specifiers.type; if (type && TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); } if (!type || !TYPE_P (type)) error_at (decl_spec_token_start->location, "friend declaration does not name a class or " "function"); else make_friend_class (current_class_type, type, /*complain=*/true); } /* If there is no TYPE, an error message will already have been issued. */ else if (!type || type == error_mark_node) ; /* An anonymous aggregate has to be handled specially; such a declaration really declares a data member (with a particular type), as opposed to a nested class. */ else if (ANON_AGGR_TYPE_P (type)) { /* Remove constructors and such from TYPE, now that we know it is an anonymous aggregate. */ fixup_anonymous_aggr (type); /* And make the corresponding data member. */ decl = build_decl (decl_spec_token_start->location, FIELD_DECL, NULL_TREE, type); /* Add it to the class. */ finish_member_declaration (decl); } else cp_parser_check_access_in_redeclaration (TYPE_NAME (type), decl_spec_token_start->location); } } else { bool assume_semicolon = false; /* See if these declarations will be friends. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes = NULL_TREE; tree first_attribute; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for a bitfield declaration. */ if (token->type == CPP_COLON || (token->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { tree identifier; tree width; /* Get the name of the bitfield. Note that we cannot just check TOKEN here because it may have been invalidated by the call to cp_lexer_peek_nth_token above. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); /* Look for attributes that apply to the bitfield. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* Create the bitfield declaration. */ decl = grokbitfield (identifier ? make_id_declarator (NULL_TREE, identifier, sfk_none) : NULL, &decl_specifiers, width, attributes); } else { cp_declarator *declarator; tree initializer; tree asm_specification; int ctor_dtor_or_conv_p; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true); /* If something went wrong parsing the declarator, make sure that we at least consume some tokens. */ if (declarator == cp_error_declarator) { /* Skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is not a semicolon, that is probably because we just skipped over the body of a function. So, we consume a semicolon if present, but do not issue an error message if it is not present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto out; } if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type, decl_specifiers.type_location); /* Look for an asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* Look for attributes that apply to the declaration. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* If it's an `=', then we have a constant-initializer or a pure-specifier. It is not correct to parse the initializer before registering the member declaration since the member declaration should be in scope while its initializer is processed. However, the rest of the front end does not yet provide an interface that allows us to handle this correctly. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* In [class.mem]: A pure-specifier shall be used only in the declaration of a virtual function. A member-declarator can contain a constant-initializer only if it declares a static member of integral or enumeration type. Therefore, if the DECLARATOR is for a function, we look for a pure-specifier; otherwise, we look for a constant-initializer. When we call `grokfield', it will perform more stringent semantics checks. */ initializer_token_start = cp_lexer_peek_token (parser->lexer); if (function_declarator_p (declarator) || (decl_specifiers.type && TREE_CODE (decl_specifiers.type) == TYPE_DECL && (TREE_CODE (TREE_TYPE (decl_specifiers.type)) == FUNCTION_TYPE))) initializer = cp_parser_pure_specifier (parser); else if (decl_specifiers.storage_class != sc_static) initializer = cp_parser_save_nsdmi (parser); else if (cxx_dialect >= cxx0x) { bool nonconst; /* Don't require a constant rvalue in C++11, since we might want a reference constant. We'll enforce constancy later. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &nonconst); } else /* Parse the initializer. */ initializer = cp_parser_constant_initializer (parser); } else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && !function_declarator_p (declarator)) { bool x; if (decl_specifiers.storage_class != sc_static) initializer = cp_parser_save_nsdmi (parser); else initializer = cp_parser_initializer (parser, &x, &x); } /* Otherwise, there is no initializer. */ else initializer = NULL_TREE; /* See if we are probably looking at a function definition. We are certainly not looking at a member-declarator. Calling `grokfield' has side-effects, so we must not do it unless we are sure that we are looking at a member-declarator. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) { /* The grammar does not allow a pure-specifier to be used when a member function is defined. (It is possible that this fact is an oversight in the standard, since a pure function may be defined outside of the class-specifier. */ if (initializer && initializer_token_start) error_at (initializer_token_start->location, "pure-specifier on function-definition"); decl = cp_parser_save_member_function_body (parser, &decl_specifiers, declarator, attributes); /* If the member was not a friend, declare it here. */ if (!friend_p) finish_member_declaration (decl); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a semicolon, consume it. */ if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); goto out; } else if (declarator->kind == cdk_function) declarator->id_loc = token->location; /* Create the declaration. */ decl = grokfield (declarator, &decl_specifiers, initializer, /*init_const_expr_p=*/true, asm_specification, attributes); } /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; /* If there is any qualification still in effect, clear it now; we will be starting fresh with the next declarator. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* If it's a `,', then there are more declarators. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* If the next token isn't a `;', then we have a parse error. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { /* The next token might be a ways away from where the actual semicolon is missing. Find the previous token and use that for our error position. */ cp_token *token = cp_lexer_previous_token (parser->lexer); error_at (token->location, "expected %<;%> at end of member declaration"); /* Assume that the user meant to provide a semicolon. If we were to cp_parser_skip_to_end_of_statement, we might skip to a semicolon inside a member function definition and issue nonsensical error messages. */ assume_semicolon = true; } if (decl) { /* Add DECL to the list of members. */ if (!friend_p) finish_member_declaration (decl); if (TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); else if (TREE_CODE (decl) == FIELD_DECL && !DECL_C_BIT_FIELD (decl) && DECL_INITIAL (decl)) /* Add DECL to the queue of NSDMI to be parsed later. */ VEC_safe_push (tree, gc, unparsed_nsdmis, decl); } if (assume_semicolon) goto out; } } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* Parse a pure-specifier. pure-specifier: = 0 Returns INTEGER_ZERO_NODE if a pure specifier is found. Otherwise, ERROR_MARK_NODE is returned. */ static tree cp_parser_pure_specifier (cp_parser* parser) { cp_token *token; /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) return error_mark_node; /* Look for the `0' token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) return error_mark_node; cp_lexer_consume_token (parser->lexer); /* Accept = default or = delete in c++0x mode. */ if (token->keyword == RID_DEFAULT || token->keyword == RID_DELETE) { maybe_warn_cpp0x (CPP0X_DEFAULTED_DELETED); return token->u.value; } /* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */ if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO)) { cp_parser_error (parser, "invalid pure specifier (only %<= 0%> is allowed)"); cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } if (PROCESSING_REAL_TEMPLATE_DECL_P ()) { error_at (token->location, "templates may not be %<virtual%>"); return error_mark_node; } return integer_zero_node; } /* Parse a constant-initializer. constant-initializer: = constant-expression Returns a representation of the constant-expression. */ static tree cp_parser_constant_initializer (cp_parser* parser) { /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) return error_mark_node; /* It is invalid to write: struct S { static const int i = { 7 }; }; */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_parser_error (parser, "a brace-enclosed initializer is not allowed here"); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); /* Skip the initializer. */ cp_parser_skip_to_closing_brace (parser); /* Look for the trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); return error_mark_node; } return cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); } /* Derived classes [gram.class.derived] */ /* Parse a base-clause. base-clause: : base-specifier-list base-specifier-list: base-specifier ... [opt] base-specifier-list , base-specifier ... [opt] Returns a TREE_LIST representing the base-classes, in the order in which they were declared. The representation of each node is as described by cp_parser_base_specifier. In the case that no bases are specified, this function will return NULL_TREE, not ERROR_MARK_NODE. */ static tree cp_parser_base_clause (cp_parser* parser) { tree bases = NULL_TREE; /* Look for the `:' that begins the list. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* Scan the base-specifier-list. */ while (true) { cp_token *token; tree base; bool pack_expansion_p = false; /* Look for the base-specifier. */ base = cp_parser_base_specifier (parser); /* Look for the (optional) ellipsis. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); pack_expansion_p = true; } /* Add BASE to the front of the list. */ if (base && base != error_mark_node) { if (pack_expansion_p) /* Make this a pack expansion type. */ TREE_VALUE (base) = make_pack_expansion (TREE_VALUE (base)); if (!check_for_bare_parameter_packs (TREE_VALUE (base))) { TREE_CHAIN (base) = bases; bases = base; } } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a comma, then the list is complete. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } /* PARSER->SCOPE may still be non-NULL at this point, if the last base class had a qualified name. However, the next name that appears is certainly not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return nreverse (bases); } /* Parse a base-specifier. base-specifier: :: [opt] nested-name-specifier [opt] class-name virtual access-specifier [opt] :: [opt] nested-name-specifier [opt] class-name access-specifier virtual [opt] :: [opt] nested-name-specifier [opt] class-name Returns a TREE_LIST. The TREE_PURPOSE will be one of ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to indicate the specifiers provided. The TREE_VALUE will be a TYPE (or the ERROR_MARK_NODE) indicating the type that was specified. */ static tree cp_parser_base_specifier (cp_parser* parser) { cp_token *token; bool done = false; bool virtual_p = false; bool duplicate_virtual_error_issued_p = false; bool duplicate_access_error_issued_p = false; bool class_scope_p, template_p; tree access = access_default_node; tree type; /* Process the optional `virtual' and `access-specifier'. */ while (!done) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Process `virtual'. */ switch (token->keyword) { case RID_VIRTUAL: /* If `virtual' appears more than once, issue an error. */ if (virtual_p && !duplicate_virtual_error_issued_p) { cp_parser_error (parser, "%<virtual%> specified more than once in base-specified"); duplicate_virtual_error_issued_p = true; } virtual_p = true; /* Consume the `virtual' token. */ cp_lexer_consume_token (parser->lexer); break; case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* If more than one access specifier appears, issue an error. */ if (access != access_default_node && !duplicate_access_error_issued_p) { cp_parser_error (parser, "more than one access specifier in base-specified"); duplicate_access_error_issued_p = true; } access = ridpointers[(int) token->keyword]; /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); break; default: done = true; break; } } /* It is not uncommon to see programs mechanically, erroneously, use the 'typename' keyword to denote (dependent) qualified types as base classes. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { token = cp_lexer_peek_token (parser->lexer); if (!processing_template_decl) error_at (token->location, "keyword %<typename%> not allowed outside of templates"); else error_at (token->location, "keyword %<typename%> not allowed in this context " "(the base class is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to pretend that we have seen the `typename' keyword at this point. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, typename_type, /*is_declaration=*/true); /* If the base class is given by a qualified name, assume that names we see are type names or templates, as appropriate. */ class_scope_p = (parser->scope && TYPE_P (parser->scope)); template_p = class_scope_p && cp_parser_optional_template_keyword (parser); if (!parser->scope && cp_lexer_next_token_is_decltype (parser->lexer)) /* DR 950 allows decltype as a base-specifier. */ type = cp_parser_decltype (parser); else { /* Otherwise, look for the class-name. */ type = cp_parser_class_name (parser, class_scope_p, template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); type = TREE_TYPE (type); } if (type == error_mark_node) return error_mark_node; return finish_base_specifier (type, access, virtual_p); } /* Exception handling [gram.exception] */ /* Parse an (optional) noexcept-specification. noexcept-specification: noexcept ( constant-expression ) [opt] If no noexcept-specification is present, returns NULL_TREE. Otherwise, if REQUIRE_CONSTEXPR is false, then either parse and return any expression if parentheses follow noexcept, or return BOOLEAN_TRUE_NODE if there are no parentheses. CONSUMED_EXPR will be set accordingly. Otherwise, returns a noexcept specification unless RETURN_COND is true, in which case a boolean condition is returned instead. */ static tree cp_parser_noexcept_specification_opt (cp_parser* parser, bool require_constexpr, bool* consumed_expr, bool return_cond) { cp_token *token; const char *saved_message; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Is it a noexcept-specification? */ if (cp_parser_is_keyword (token, RID_NOEXCEPT)) { tree expr; cp_lexer_consume_token (parser->lexer); if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); if (require_constexpr) { /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in an exception-specification"); expr = cp_parser_constant_expression (parser, false, NULL); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else { expr = cp_parser_expression (parser, false, NULL); *consumed_expr = true; } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } else { expr = boolean_true_node; if (!require_constexpr) *consumed_expr = false; } /* We cannot build a noexcept-spec right away because this will check that expr is a constexpr. */ if (!return_cond) return build_noexcept_spec (expr, tf_warning_or_error); else return expr; } else return NULL_TREE; } /* Parse an (optional) exception-specification. exception-specification: throw ( type-id-list [opt] ) Returns a TREE_LIST representing the exception-specification. The TREE_VALUE of each node is a type. */ static tree cp_parser_exception_specification_opt (cp_parser* parser) { cp_token *token; tree type_id_list; const char *saved_message; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Is it a noexcept-specification? */ type_id_list = cp_parser_noexcept_specification_opt(parser, true, NULL, false); if (type_id_list != NULL_TREE) return type_id_list; /* If it's not `throw', then there's no exception-specification. */ if (!cp_parser_is_keyword (token, RID_THROW)) return NULL_TREE; #if 0 /* Enable this once a lot of code has transitioned to noexcept? */ if (cxx_dialect == cxx0x && !in_system_header) warning (OPT_Wdeprecated, "dynamic exception specifications are " "deprecated in C++0x; use %<noexcept%> instead"); #endif /* Consume the `throw'. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a `)', then there is a type-id-list. */ if (token->type != CPP_CLOSE_PAREN) { /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in an exception-specification"); /* Parse the type-id-list. */ type_id_list = cp_parser_type_id_list (parser); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else type_id_list = empty_except_spec; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return type_id_list; } /* Parse an (optional) type-id-list. type-id-list: type-id ... [opt] type-id-list , type-id ... [opt] Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE, in the order that the types were presented. */ static tree cp_parser_type_id_list (cp_parser* parser) { tree types = NULL_TREE; while (true) { cp_token *token; tree type; /* Get the next type-id. */ type = cp_parser_type_id (parser); /* Parse the optional ellipsis. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Turn the type into a pack expansion expression. */ type = make_pack_expansion (type); } /* Add it to the list. */ types = add_exception_specifier (types, type, /*complain=*/1); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is not a `,', we are done. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return nreverse (types); } /* Parse a try-block. try-block: try compound-statement handler-seq */ static tree cp_parser_try_block (cp_parser* parser) { tree try_block; cp_parser_require_keyword (parser, RID_TRY, RT_TRY); try_block = begin_try_block (); cp_parser_compound_statement (parser, NULL, true, false); finish_try_block (try_block); cp_parser_handler_seq (parser); finish_handler_sequence (try_block); return try_block; } /* Parse a function-try-block. function-try-block: try ctor-initializer [opt] function-body handler-seq */ static bool cp_parser_function_try_block (cp_parser* parser) { tree compound_stmt; tree try_block; bool ctor_initializer_p; /* Look for the `try' keyword. */ if (!cp_parser_require_keyword (parser, RID_TRY, RT_TRY)) return false; /* Let the rest of the front end know where we are. */ try_block = begin_function_try_block (&compound_stmt); /* Parse the function-body. */ ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); /* We're done with the `try' part. */ finish_function_try_block (try_block); /* Parse the handlers. */ cp_parser_handler_seq (parser); /* We're done with the handlers. */ finish_function_handler_sequence (try_block, compound_stmt); return ctor_initializer_p; } /* Parse a handler-seq. handler-seq: handler handler-seq [opt] */ static void cp_parser_handler_seq (cp_parser* parser) { while (true) { cp_token *token; /* Parse the handler. */ cp_parser_handler (parser); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `catch' then there are no more handlers. */ if (!cp_parser_is_keyword (token, RID_CATCH)) break; } } /* Parse a handler. handler: catch ( exception-declaration ) compound-statement */ static void cp_parser_handler (cp_parser* parser) { tree handler; tree declaration; cp_parser_require_keyword (parser, RID_CATCH, RT_CATCH); handler = begin_handler (); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); declaration = cp_parser_exception_declaration (parser); finish_handler_parms (declaration, handler); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_compound_statement (parser, NULL, false, false); finish_handler (handler); } /* Parse an exception-declaration. exception-declaration: type-specifier-seq declarator type-specifier-seq abstract-declarator type-specifier-seq ... Returns a VAR_DECL for the declaration, or NULL_TREE if the ellipsis variant is used. */ static tree cp_parser_exception_declaration (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; const char *saved_message; /* If it's an ellipsis, it's easy to handle. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } /* Types may not be defined in exception-declarations. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in exception-declarations"); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/true, /*is_trailing_return=*/false, &type_specifiers); /* If it's a `)', then there is no declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) declarator = NULL; else declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; if (!type_specifiers.any_specifiers_p) return error_mark_node; return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL); } /* Parse a throw-expression. throw-expression: throw assignment-expression [opt] Returns a THROW_EXPR representing the throw-expression. */ static tree cp_parser_throw_expression (cp_parser* parser) { tree expression; cp_token* token; cp_parser_require_keyword (parser, RID_THROW, RT_THROW); token = cp_lexer_peek_token (parser->lexer); /* Figure out whether or not there is an assignment-expression following the "throw" keyword. */ if (token->type == CPP_COMMA || token->type == CPP_SEMICOLON || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE || token->type == CPP_CLOSE_BRACE || token->type == CPP_COLON) expression = NULL_TREE; else expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); return build_throw (expression); } /* GNU Extensions */ /* Parse an (optional) asm-specification. asm-specification: asm ( string-literal ) If the asm-specification is present, returns a STRING_CST corresponding to the string-literal. Otherwise, returns NULL_TREE. */ static tree cp_parser_asm_specification_opt (cp_parser* parser) { cp_token *token; tree asm_specification; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token isn't the `asm' keyword, then there's no asm-specification. */ if (!cp_parser_is_keyword (token, RID_ASM)) return NULL_TREE; /* Consume the `asm' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Look for the string-literal. */ asm_specification = cp_parser_string_literal (parser, false, false); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return asm_specification; } /* Parse an asm-operand-list. asm-operand-list: asm-operand asm-operand-list , asm-operand asm-operand: string-literal ( expression ) [ string-literal ] string-literal ( expression ) Returns a TREE_LIST representing the operands. The TREE_VALUE of each node is the expression. The TREE_PURPOSE is itself a TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed string-literal (or NULL_TREE if not present) and whose TREE_VALUE is a STRING_CST for the string literal before the parenthesis. Returns ERROR_MARK_NODE if any of the operands are invalid. */ static tree cp_parser_asm_operand_list (cp_parser* parser) { tree asm_operands = NULL_TREE; bool invalid_operands = false; while (true) { tree string_literal; tree expression; tree name; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Read the operand name. */ name = cp_parser_identifier (parser); if (name != error_mark_node) name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); } else name = NULL_TREE; /* Look for the string-literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); if (name == error_mark_node || string_literal == error_mark_node || expression == error_mark_node) invalid_operands = true; /* Add this operand to the list. */ asm_operands = tree_cons (build_tree_list (name, string_literal), expression, asm_operands); /* If the next token is not a `,', there are no more operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return invalid_operands ? error_mark_node : nreverse (asm_operands); } /* Parse an asm-clobber-list. asm-clobber-list: string-literal asm-clobber-list , string-literal Returns a TREE_LIST, indicating the clobbers in the order that they appeared. The TREE_VALUE of each node is a STRING_CST. */ static tree cp_parser_asm_clobber_list (cp_parser* parser) { tree clobbers = NULL_TREE; while (true) { tree string_literal; /* Look for the string literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Add it to the list. */ clobbers = tree_cons (NULL_TREE, string_literal, clobbers); /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return clobbers; } /* Parse an asm-label-list. asm-label-list: identifier asm-label-list , identifier Returns a TREE_LIST, indicating the labels in the order that they appeared. The TREE_VALUE of each node is a label. */ static tree cp_parser_asm_label_list (cp_parser* parser) { tree labels = NULL_TREE; while (true) { tree identifier, label, name; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (!error_operand_p (identifier)) { label = lookup_label (identifier); if (TREE_CODE (label) == LABEL_DECL) { TREE_USED (label) = 1; check_goto (label); name = build_string (IDENTIFIER_LENGTH (identifier), IDENTIFIER_POINTER (identifier)); labels = tree_cons (name, label, labels); } } /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return nreverse (labels); } /* Parse an (optional) series of attributes. attributes: attributes attribute attribute: __attribute__ (( attribute-list [opt] )) The return value is as for cp_parser_attribute_list. */ static tree cp_parser_attributes_opt (cp_parser* parser) { tree attributes = NULL_TREE; while (true) { cp_token *token; tree attribute_list; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `__attribute__', then we're done. */ if (token->keyword != RID_ATTRIBUTE) break; /* Consume the `__attribute__' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the two `(' tokens. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_CLOSE_PAREN) /* Parse the attribute-list. */ attribute_list = cp_parser_attribute_list (parser); else /* If the next token is a `)', then there is no attribute list. */ attribute_list = NULL; /* Look for the two `)' tokens. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Add these new attributes to the list. */ attributes = chainon (attributes, attribute_list); } return attributes; } /* Parse an attribute-list. attribute-list: attribute attribute-list , attribute attribute: identifier identifier ( identifier ) identifier ( identifier , expression-list ) identifier ( expression-list ) Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds to an attribute. The TREE_PURPOSE of each node is the identifier indicating which attribute is in use. The TREE_VALUE represents the arguments, if any. */ static tree cp_parser_attribute_list (cp_parser* parser) { tree attribute_list = NULL_TREE; bool save_translate_strings_p = parser->translate_strings_p; parser->translate_strings_p = false; while (true) { cp_token *token; tree identifier; tree attribute; /* Look for the identifier. We also allow keywords here; for example `__attribute__ ((const))' is legal. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { tree arguments = NULL_TREE; /* Consume the token. */ token = cp_lexer_consume_token (parser->lexer); /* Save away the identifier that indicates which attribute this is. */ identifier = (token->type == CPP_KEYWORD) /* For keywords, use the canonical spelling, not the parsed identifier. */ ? ridpointers[(int) token->keyword] : token->u.value; attribute = build_tree_list (identifier, NULL_TREE); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an `(', then parse the attribute arguments. */ if (token->type == CPP_OPEN_PAREN) { VEC(tree,gc) *vec; int attr_flag = (attribute_takes_identifier_p (identifier) ? id_attr : normal_attr); vec = cp_parser_parenthesized_expression_list (parser, attr_flag, /*cast_p=*/false, /*allow_expansion_p=*/false, /*non_constant_p=*/NULL); if (vec == NULL) arguments = error_mark_node; else { arguments = build_tree_list_vec (vec); release_tree_vector (vec); } /* Save the arguments away. */ TREE_VALUE (attribute) = arguments; } if (arguments != error_mark_node) { /* Add this attribute to the list. */ TREE_CHAIN (attribute) = attribute_list; attribute_list = attribute; } token = cp_lexer_peek_token (parser->lexer); } /* Now, look for more attributes. If the next token isn't a `,', we're done. */ if (token->type != CPP_COMMA) break; /* Consume the comma and keep going. */ cp_lexer_consume_token (parser->lexer); } parser->translate_strings_p = save_translate_strings_p; /* We built up the list in reverse order. */ return nreverse (attribute_list); } /* Parse an optional `__extension__' keyword. Returns TRUE if it is present, and FALSE otherwise. *SAVED_PEDANTIC is set to the current value of the PEDANTIC flag, regardless of whether or not the `__extension__' keyword is present. The caller is responsible for restoring the value of the PEDANTIC flag. */ static bool cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic) { /* Save the old value of the PEDANTIC flag. */ *saved_pedantic = pedantic; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION)) { /* Consume the `__extension__' token. */ cp_lexer_consume_token (parser->lexer); /* We're not being pedantic while the `__extension__' keyword is in effect. */ pedantic = 0; return true; } return false; } /* Parse a label declaration. label-declaration: __label__ label-declarator-seq ; label-declarator-seq: identifier , label-declarator-seq identifier */ static void cp_parser_label_declaration (cp_parser* parser) { /* Look for the `__label__' keyword. */ cp_parser_require_keyword (parser, RID_LABEL, RT_LABEL); while (true) { tree identifier; /* Look for an identifier. */ identifier = cp_parser_identifier (parser); /* If we failed, stop. */ if (identifier == error_mark_node) break; /* Declare it as a label. */ finish_label_decl (identifier); /* If the next token is a `;', stop. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) break; /* Look for the `,' separating the label declarations. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } /* Support Functions */ /* Looks up NAME in the current scope, as given by PARSER->SCOPE. NAME should have one of the representations used for an id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE is returned. If PARSER->SCOPE is a dependent type, then a SCOPE_REF is returned. If NAME is a TEMPLATE_ID_EXPR, then it will be immediately returned; the name was already resolved when the TEMPLATE_ID_EXPR was formed. Abstractly, such entities should not be passed to this function, because they do not need to be looked up, but it is simpler to check for this special case here, rather than at the call-sites. In cases not explicitly covered above, this function returns a DECL, OVERLOAD, or baselink representing the result of the lookup. If there was no entity with the indicated NAME, the ERROR_MARK_NODE is returned. If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword (e.g., "struct") that was used. In that case bindings that do not refer to types are ignored. If IS_TEMPLATE is TRUE, bindings that do not refer to templates are ignored. If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces are ignored. If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent types. If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a TREE_LIST of candidates if name-lookup results in an ambiguity, and NULL_TREE otherwise. */ static tree cp_parser_lookup_name (cp_parser *parser, tree name, enum tag_types tag_type, bool is_template, bool is_namespace, bool check_dependency, tree *ambiguous_decls, location_t name_location) { int flags = 0; tree decl; tree object_type = parser->context->object_type; if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) flags |= LOOKUP_COMPLAIN; /* Assume that the lookup will be unambiguous. */ if (ambiguous_decls) *ambiguous_decls = NULL_TREE; /* Now that we have looked up the name, the OBJECT_TYPE (if any) is no longer valid. Note that if we are parsing tentatively, and the parse fails, OBJECT_TYPE will be automatically restored. */ parser->context->object_type = NULL_TREE; if (name == error_mark_node) return error_mark_node; /* A template-id has already been resolved; there is no lookup to do. */ if (TREE_CODE (name) == TEMPLATE_ID_EXPR) return name; if (BASELINK_P (name)) { gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name)) == TEMPLATE_ID_EXPR); return name; } /* A BIT_NOT_EXPR is used to represent a destructor. By this point, it should already have been checked to make sure that the name used matches the type being destroyed. */ if (TREE_CODE (name) == BIT_NOT_EXPR) { tree type; /* Figure out to which type this destructor applies. */ if (parser->scope) type = parser->scope; else if (object_type) type = object_type; else type = current_class_type; /* If that's not a class type, there is no destructor. */ if (!type || !CLASS_TYPE_P (type)) return error_mark_node; if (CLASSTYPE_LAZY_DESTRUCTOR (type)) lazily_declare_fn (sfk_destructor, type); if (!CLASSTYPE_DESTRUCTORS (type)) return error_mark_node; /* If it was a class type, return the destructor. */ return CLASSTYPE_DESTRUCTORS (type); } /* By this point, the NAME should be an ordinary identifier. If the id-expression was a qualified name, the qualifying scope is stored in PARSER->SCOPE at this point. */ gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE); /* Perform the lookup. */ if (parser->scope) { bool dependent_p; if (parser->scope == error_mark_node) return error_mark_node; /* If the SCOPE is dependent, the lookup must be deferred until the template is instantiated -- unless we are explicitly looking up names in uninstantiated templates. Even then, we cannot look up the name if the scope is not a class type; it might, for example, be a template type parameter. */ dependent_p = (TYPE_P (parser->scope) && dependent_scope_p (parser->scope)); if ((check_dependency || !CLASS_TYPE_P (parser->scope)) && dependent_p) /* Defer lookup. */ decl = error_mark_node; else { tree pushed_scope = NULL_TREE; /* If PARSER->SCOPE is a dependent type, then it must be a class type, and we must not be checking dependencies; otherwise, we would have processed this lookup above. So that PARSER->SCOPE is not considered a dependent base by lookup_member, we must enter the scope here. */ if (dependent_p) pushed_scope = push_scope (parser->scope); /* If the PARSER->SCOPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ decl = lookup_qualified_name (parser->scope, name, tag_type != none_type, /*complain=*/true); /* 3.4.3.1: In a lookup in which the constructor is an acceptable lookup result and the nested-name-specifier nominates a class C: * if the name specified after the nested-name-specifier, when looked up in C, is the injected-class-name of C (Clause 9), or * if the name specified after the nested-name-specifier is the same as the identifier or the simple-template-id's template- name in the last component of the nested-name-specifier, the name is instead considered to name the constructor of class C. [ Note: for example, the constructor is not an acceptable lookup result in an elaborated-type-specifier so the constructor would not be used in place of the injected-class-name. --end note ] Such a constructor name shall be used only in the declarator-id of a declaration that names a constructor or in a using-declaration. */ if (tag_type == none_type && DECL_SELF_REFERENCE_P (decl) && same_type_p (DECL_CONTEXT (decl), parser->scope)) decl = lookup_qualified_name (parser->scope, ctor_identifier, tag_type != none_type, /*complain=*/true); /* If we have a single function from a using decl, pull it out. */ if (TREE_CODE (decl) == OVERLOAD && !really_overloaded_fn (decl)) decl = OVL_FUNCTION (decl); if (pushed_scope) pop_scope (pushed_scope); } /* If the scope is a dependent type and either we deferred lookup or we did lookup but didn't find the name, rememeber the name. */ if (decl == error_mark_node && TYPE_P (parser->scope) && dependent_type_p (parser->scope)) { if (tag_type) { tree type; /* The resolution to Core Issue 180 says that `struct A::B' should be considered a type-name, even if `A' is dependent. */ type = make_typename_type (parser->scope, name, tag_type, /*complain=*/tf_error); decl = TYPE_NAME (type); } else if (is_template && (cp_parser_next_token_ends_template_argument_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))) decl = make_unbound_class_template (parser->scope, name, NULL_TREE, /*complain=*/tf_error); else decl = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, is_template); } parser->qualifying_scope = parser->scope; parser->object_scope = NULL_TREE; } else if (object_type) { tree object_decl = NULL_TREE; /* Look up the name in the scope of the OBJECT_TYPE, unless the OBJECT_TYPE is not a class. */ if (CLASS_TYPE_P (object_type)) /* If the OBJECT_TYPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ object_decl = lookup_member (object_type, name, /*protect=*/0, tag_type != none_type, tf_warning_or_error); /* Look it up in the enclosing context, too. */ decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, flags); parser->object_scope = object_type; parser->qualifying_scope = NULL_TREE; if (object_decl) decl = object_decl; } else { decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, flags); parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } /* If the lookup failed, let our caller know. */ if (!decl || decl == error_mark_node) return error_mark_node; /* Pull out the template from an injected-class-name (or multiple). */ if (is_template) decl = maybe_get_template_decl_from_type_decl (decl); /* If it's a TREE_LIST, the result of the lookup was ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { if (ambiguous_decls) *ambiguous_decls = decl; /* The error message we have to print is too complicated for cp_parser_error, so we incorporate its actions directly. */ if (!cp_parser_simulate_error (parser)) { error_at (name_location, "reference to %qD is ambiguous", name); print_candidates (decl); } return error_mark_node; } gcc_assert (DECL_P (decl) || TREE_CODE (decl) == OVERLOAD || TREE_CODE (decl) == SCOPE_REF || TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE || BASELINK_P (decl)); /* If we have resolved the name of a member declaration, check to see if the declaration is accessible. When the name resolves to set of overloaded functions, accessibility is checked when overload resolution is done. During an explicit instantiation, access is not checked at all, as per [temp.explicit]. */ if (DECL_P (decl)) check_accessibility_of_qualified_id (decl, object_type, parser->scope); maybe_record_typedef_use (decl); return decl; } /* Like cp_parser_lookup_name, but for use in the typical case where CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE, IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */ static tree cp_parser_lookup_name_simple (cp_parser* parser, tree name, location_t location) { return cp_parser_lookup_name (parser, name, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, location); } /* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in the current context, return the TYPE_DECL. If TAG_NAME_P is true, the DECL indicates the class being defined in a class-head, or declared in an elaborated-type-specifier. Otherwise, return DECL. */ static tree cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p) { /* If the TEMPLATE_DECL is being declared as part of a class-head, the translation from TEMPLATE_DECL to TYPE_DECL occurs: struct A { template <typename T> struct B; }; template <typename T> struct A::B {}; Similarly, in an elaborated-type-specifier: namespace N { struct X{}; } struct A { template <typename T> friend struct N::X; }; However, if the DECL refers to a class type, and we are in the scope of the class, then the name lookup automatically finds the TYPE_DECL created by build_self_reference rather than a TEMPLATE_DECL. For example, in: template <class T> struct S { S s; }; there is no need to handle such case. */ if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p) return DECL_TEMPLATE_RESULT (decl); return decl; } /* If too many, or too few, template-parameter lists apply to the declarator, issue an error message. Returns TRUE if all went well, and FALSE otherwise. */ static bool cp_parser_check_declarator_template_parameters (cp_parser* parser, cp_declarator *declarator, location_t declarator_location) { unsigned num_templates; /* We haven't seen any classes that involve template parameters yet. */ num_templates = 0; switch (declarator->kind) { case cdk_id: if (declarator->u.id.qualifying_scope) { tree scope; scope = declarator->u.id.qualifying_scope; while (scope && CLASS_TYPE_P (scope)) { /* You're supposed to have one `template <...>' for every template class, but you don't need one for a full specialization. For example: template <class T> struct S{}; template <> struct S<int> { void f(); }; void S<int>::f () {} is correct; there shouldn't be a `template <>' for the definition of `S<int>::f'. */ if (!CLASSTYPE_TEMPLATE_INFO (scope)) /* If SCOPE does not have template information of any kind, then it is not a template, nor is it nested within a template. */ break; if (explicit_class_specialization_p (scope)) break; if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))) ++num_templates; scope = TYPE_CONTEXT (scope); } } else if (TREE_CODE (declarator->u.id.unqualified_name) == TEMPLATE_ID_EXPR) /* If the DECLARATOR has the form `X<y>' then it uses one additional level of template parameters. */ ++num_templates; return cp_parser_check_template_parameters (parser, num_templates, declarator_location, declarator); case cdk_function: case cdk_array: case cdk_pointer: case cdk_reference: case cdk_ptrmem: return (cp_parser_check_declarator_template_parameters (parser, declarator->declarator, declarator_location)); case cdk_error: return true; default: gcc_unreachable (); } return false; } /* NUM_TEMPLATES were used in the current declaration. If that is invalid, return FALSE and issue an error messages. Otherwise, return TRUE. If DECLARATOR is non-NULL, then we are checking a declarator and we can print more accurate diagnostics. */ static bool cp_parser_check_template_parameters (cp_parser* parser, unsigned num_templates, location_t location, cp_declarator *declarator) { /* If there are the same number of template classes and parameter lists, that's OK. */ if (parser->num_template_parameter_lists == num_templates) return true; /* If there are more, but only one more, then we are referring to a member template. That's OK too. */ if (parser->num_template_parameter_lists == num_templates + 1) return true; /* If there are more template classes than parameter lists, we have something like: template <class T> void S<T>::R<T>::f (); */ if (parser->num_template_parameter_lists < num_templates) { if (declarator && !current_function_decl) error_at (location, "specializing member %<%T::%E%> " "requires %<template<>%> syntax", declarator->u.id.qualifying_scope, declarator->u.id.unqualified_name); else if (declarator) error_at (location, "invalid declaration of %<%T::%E%>", declarator->u.id.qualifying_scope, declarator->u.id.unqualified_name); else error_at (location, "too few template-parameter-lists"); return false; } /* Otherwise, there are too many template parameter lists. We have something like: template <class T> template <class U> void S::f(); */ error_at (location, "too many template-parameter-lists"); return false; } /* Parse an optional `::' token indicating that the following name is from the global namespace. If so, PARSER->SCOPE is set to the GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE, unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone. Returns the new value of PARSER->SCOPE, if the `::' token is present, and NULL_TREE otherwise. */ static tree cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a `::' token then we're starting from the global namespace, not our current location. */ if (token->type == CPP_SCOPE) { /* Consume the `::' token. */ cp_lexer_consume_token (parser->lexer); /* Set the SCOPE so that we know where to start the lookup. */ parser->scope = global_namespace; parser->qualifying_scope = global_namespace; parser->object_scope = NULL_TREE; return parser->scope; } else if (!current_scope_valid_p) { parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } return NULL_TREE; } /* Returns TRUE if the upcoming token sequence is the start of a constructor declarator. If FRIEND_P is true, the declarator is preceded by the `friend' specifier. */ static bool cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p) { bool constructor_p; tree nested_name_specifier; cp_token *next_token; /* The common case is that this is not a constructor declarator, so try to avoid doing lots of work if at all possible. It's not valid declare a constructor at function scope. */ if (parser->in_function_body) return false; /* And only certain tokens can begin a constructor declarator. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type != CPP_NAME && next_token->type != CPP_SCOPE && next_token->type != CPP_NESTED_NAME_SPECIFIER && next_token->type != CPP_TEMPLATE_ID) return false; /* Parse tentatively; we are going to roll back all of the tokens consumed here. */ cp_parser_parse_tentatively (parser); /* Assume that we are looking at a constructor declarator. */ constructor_p = true; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ nested_name_specifier = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false)); /* Outside of a class-specifier, there must be a nested-name-specifier. */ if (!nested_name_specifier && (!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type) || friend_p)) constructor_p = false; else if (nested_name_specifier == error_mark_node) constructor_p = false; /* If we have a class scope, this is easy; DR 147 says that S::S always names the constructor, and no other qualified name could. */ if (constructor_p && nested_name_specifier && CLASS_TYPE_P (nested_name_specifier)) { tree id = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*declarator_p=*/true, /*optional_p=*/false); if (is_overloaded_fn (id)) id = DECL_NAME (get_first_fn (id)); if (!constructor_name_p (id, nested_name_specifier)) constructor_p = false; } /* If we still think that this might be a constructor-declarator, look for a class-name. */ else if (constructor_p) { /* If we have: template <typename T> struct S { S(); }; we must recognize that the nested `S' names a class. */ tree type_decl; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/false, /*class_head_p=*/false, /*is_declaration=*/false); /* If there was no class-name, then this is not a constructor. */ constructor_p = !cp_parser_error_occurred (parser); /* If we're still considering a constructor, we have to see a `(', to begin the parameter-declaration-clause, followed by either a `)', an `...', or a decl-specifier. We need to check for a type-specifier to avoid being fooled into thinking that: S (f) (int); is a constructor. (It is actually a function named `f' that takes one parameter (of type `int') and returns a value of type `S'. */ if (constructor_p && !cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) constructor_p = false; if (constructor_p && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS) /* A parameter declaration begins with a decl-specifier, which is either the "attribute" keyword, a storage class specifier, or (usually) a type-specifier. */ && !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer)) { tree type; tree pushed_scope = NULL_TREE; unsigned saved_num_template_parameter_lists; /* Names appearing in the type-specifier should be looked up in the scope of the class. */ if (current_class_type) type = NULL_TREE; else { type = TREE_TYPE (type_decl); if (TREE_CODE (type) == TYPENAME_TYPE) { type = resolve_typename_type (type, /*only_current_p=*/false); if (TREE_CODE (type) == TYPENAME_TYPE) { cp_parser_abort_tentative_parse (parser); return false; } } pushed_scope = push_scope (type); } /* Inside the constructor parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* Look for the type-specifier. */ cp_parser_type_specifier (parser, CP_PARSER_FLAGS_NONE, /*decl_specs=*/NULL, /*is_declarator=*/true, /*declares_class_or_enum=*/NULL, /*is_cv_qualifier=*/NULL); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* Leave the scope of the class. */ if (pushed_scope) pop_scope (pushed_scope); constructor_p = !cp_parser_error_occurred (parser); } } /* We did not really want to consume any tokens. */ cp_parser_abort_tentative_parse (parser); return constructor_p; } /* Parse the definition of the function given by the DECL_SPECIFIERS, ATTRIBUTES, and DECLARATOR. The access checks have been deferred; they must be performed once we are in the scope of the function. Returns the function defined. */ static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, tree attributes, const cp_declarator *declarator) { tree fn; bool success_p; /* Begin the function-definition. */ success_p = start_function (decl_specifiers, declarator, attributes); /* The things we're about to see are not directly qualified by any template headers we've seen thus far. */ reset_specialization (); /* If there were names looked up in the decl-specifier-seq that we did not check, check them now. We must wait until we are in the scope of the function to perform the checks, since the function might be a friend. */ perform_deferred_access_checks (); if (!success_p) { /* Skip the entire function. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = error_mark_node; } else if (DECL_INITIAL (current_function_decl) != error_mark_node) { /* Seen already, skip it. An error message has already been output. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = current_function_decl; current_function_decl = NULL_TREE; /* If this is a function from a class, pop the nested class. */ if (current_class_name) pop_nested_class (); } else { timevar_id_t tv; if (DECL_DECLARED_INLINE_P (current_function_decl)) tv = TV_PARSE_INLINE; else tv = TV_PARSE_FUNC; timevar_push (tv); fn = cp_parser_function_definition_after_declarator (parser, /*inline_p=*/false); timevar_pop (tv); } return fn; } /* Parse the part of a function-definition that follows the declarator. INLINE_P is TRUE iff this function is an inline function defined within a class-specifier. Returns the function defined. */ static tree cp_parser_function_definition_after_declarator (cp_parser* parser, bool inline_p) { tree fn; bool ctor_initializer_p = false; bool saved_in_unbraced_linkage_specification_p; bool saved_in_function_body; unsigned saved_num_template_parameter_lists; cp_token *token; saved_in_function_body = parser->in_function_body; parser->in_function_body = true; /* If the next token is `return', then the code may be trying to make use of the "named return value" extension that G++ used to support. */ token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN)) { /* Consume the `return' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier that indicates what value is to be returned. */ cp_parser_identifier (parser); /* Issue an error message. */ error_at (token->location, "named return values are no longer supported"); /* Skip tokens until we reach the start of the function body. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; cp_lexer_consume_token (parser->lexer); } } /* The `extern' in `extern "C" void f () { ... }' does not apply to anything declared inside `f'. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Inside the function, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; start_lambda_scope (current_function_decl); /* If the next token is `try', `__transaction_atomic', or `__transaction_relaxed`, then we are looking at either function-try-block or function-transaction-block. Note that all of these include the function-body. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC)) ctor_initializer_p = cp_parser_function_transaction (parser, RID_TRANSACTION_ATOMIC); else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_RELAXED)) ctor_initializer_p = cp_parser_function_transaction (parser, RID_TRANSACTION_RELAXED); else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); finish_lambda_scope (); /* Finish the function. */ fn = finish_function ((ctor_initializer_p ? 1 : 0) | (inline_p ? 2 : 0)); /* Generate code for it, if necessary. */ expand_or_defer_fn (fn); /* Restore the saved values. */ parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_function_body = saved_in_function_body; return fn; } /* Parse a template-declaration, assuming that the `export' (and `extern') keywords, if present, has already been scanned. MEMBER_P is as for cp_parser_template_declaration. */ static void cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p) { tree decl = NULL_TREE; VEC (deferred_access_check,gc) *checks; tree parameter_list; bool friend_p = false; bool need_lang_pop; cp_token *token; /* Look for the `template' keyword. */ token = cp_lexer_peek_token (parser->lexer); if (!cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE)) return; /* And the `<'. */ if (!cp_parser_require (parser, CPP_LESS, RT_LESS)) return; if (at_class_scope_p () && current_function_decl) { /* 14.5.2.2 [temp.mem] A local class shall not have member templates. */ error_at (token->location, "invalid declaration of member template in local class"); cp_parser_skip_to_end_of_block_or_statement (parser); return; } /* [temp] A template ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error_at (token->location, "template with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* We cannot perform access checks on the template parameter declarations until we know what is being declared, just as we cannot check the decl-specifier list. */ push_deferring_access_checks (dk_deferred); /* If the next token is `>', then we have an invalid specialization. Rather than complain about an invalid template parameter, issue an error message here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)) { cp_parser_error (parser, "invalid explicit specialization"); begin_specialization (); parameter_list = NULL_TREE; } else { /* Parse the template parameters. */ parameter_list = cp_parser_template_parameter_list (parser); fixup_template_parms (); } /* Get the deferred access checks from the parameter list. These will be checked once we know what is being declared, as for a member template the checks must be performed in the scope of the class containing the member. */ checks = get_deferred_access_checks (); /* Look for the `>'. */ cp_parser_skip_to_end_of_template_parameter_list (parser); /* We just processed one more parameter list. */ ++parser->num_template_parameter_lists; /* If the next token is `template', there are more template parameters. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) cp_parser_template_declaration_after_export (parser, member_p); else if (cxx_dialect >= cxx0x && cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) decl = cp_parser_alias_declaration (parser); else { /* There are no access checks when parsing a template, as we do not know if a specialization will be a friend. */ push_deferring_access_checks (dk_no_check); token = cp_lexer_peek_token (parser->lexer); decl = cp_parser_single_declaration (parser, checks, member_p, /*explicit_specialization_p=*/false, &friend_p); pop_deferring_access_checks (); /* If this is a member template declaration, let the front end know. */ if (member_p && !friend_p && decl) { if (TREE_CODE (decl) == TYPE_DECL) cp_parser_check_access_in_redeclaration (decl, token->location); decl = finish_member_template_decl (decl); } else if (friend_p && decl && TREE_CODE (decl) == TYPE_DECL) make_friend_class (current_class_type, TREE_TYPE (decl), /*complain=*/true); } /* We are done with the current parameter list. */ --parser->num_template_parameter_lists; pop_deferring_access_checks (); /* Finish up. */ finish_template_decl (parameter_list); /* Check the template arguments for a literal operator template. */ if (decl && (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl)) && UDLIT_OPER_P (DECL_NAME (decl))) { bool ok = true; if (parameter_list == NULL_TREE) ok = false; else { int num_parms = TREE_VEC_LENGTH (parameter_list); if (num_parms != 1) ok = false; else { tree parm_list = TREE_VEC_ELT (parameter_list, 0); tree parm = INNERMOST_TEMPLATE_PARMS (parm_list); if (TREE_TYPE (parm) != char_type_node || !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) ok = false; } } if (!ok) error ("literal operator template %qD has invalid parameter list." " Expected non-type template argument pack <char...>", decl); } /* Register member declarations. */ if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl)) finish_member_declaration (decl); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* If DECL is a function template, we must return to parse it later. (Even though there is no definition, there might be default arguments that need handling.) */ if (member_p && decl && (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl))) VEC_safe_push (tree, gc, unparsed_funs_with_definitions, decl); } /* Perform the deferred access checks from a template-parameter-list. CHECKS is a TREE_LIST of access checks, as returned by get_deferred_access_checks. */ static void cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks) { ++processing_template_parmlist; perform_access_checks (checks); --processing_template_parmlist; } /* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or `function-definition' sequence. MEMBER_P is true, this declaration appears in a class scope. Returns the DECL for the declared entity. If FRIEND_P is non-NULL, *FRIEND_P is set to TRUE iff the declaration is a friend. */ static tree cp_parser_single_declaration (cp_parser* parser, VEC (deferred_access_check,gc)* checks, bool member_p, bool explicit_specialization_p, bool* friend_p) { int declares_class_or_enum; tree decl = NULL_TREE; cp_decl_specifier_seq decl_specifiers; bool function_definition_p = false; cp_token *decl_spec_token_start; /* This function is only used when processing a template declaration. */ gcc_assert (innermost_scope_kind () == sk_template_parms || innermost_scope_kind () == sk_template_spec); /* Defer access checks until we know what is being declared. */ push_deferring_access_checks (dk_deferred); /* Try the `decl-specifier-seq [opt] init-declarator [opt]' alternative. */ decl_spec_token_start = cp_lexer_peek_token (parser->lexer); cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); if (friend_p) *friend_p = cp_parser_friend_p (&decl_specifiers); /* There are no template typedefs. */ if (decl_specifiers.specs[(int) ds_typedef]) { error_at (decl_spec_token_start->location, "template declaration of %<typedef%>"); decl = error_mark_node; } /* Gather up the access checks that occurred the decl-specifier-seq. */ stop_deferring_access_checks (); /* Check for the declaration of a template class. */ if (declares_class_or_enum) { if (cp_parser_declares_only_class_p (parser)) { decl = shadow_tag (&decl_specifiers); /* In this case: struct C { friend template <typename T> struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by shadow_tag. */ if (friend_p && *friend_p && !decl && decl_specifiers.type && TYPE_P (decl_specifiers.type)) decl = decl_specifiers.type; if (decl && decl != error_mark_node) decl = TYPE_NAME (decl); else decl = error_mark_node; /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); } } /* Complain about missing 'typename' or other invalid type names. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* cp_parser_parse_and_diagnose_invalid_type_name calls cp_parser_skip_to_end_of_block_or_statement, so don't try to parse the rest of this declaration. */ decl = error_mark_node; goto out; } /* If it's not a template class, try for a template function. If the next token is a `;', then this declaration does not declare anything. But, if there were errors in the decl-specifiers, then the error might well have come from an attempted class-specifier. In that case, there's no need to warn about a missing declarator. */ if (!decl && (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) || decl_specifiers.type != error_mark_node)) { decl = cp_parser_init_declarator (parser, &decl_specifiers, checks, /*function_definition_allowed_p=*/true, member_p, declares_class_or_enum, &function_definition_p, NULL); /* 7.1.1-1 [dcl.stc] A storage-class-specifier shall not be specified in an explicit specialization... */ if (decl && explicit_specialization_p && decl_specifiers.storage_class != sc_none) { error_at (decl_spec_token_start->location, "explicit template specialization cannot have a storage class"); decl = error_mark_node; } } /* Look for a trailing `;' after the declaration. */ if (!function_definition_p && (decl == error_mark_node || !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))) cp_parser_skip_to_end_of_block_or_statement (parser); out: pop_deferring_access_checks (); /* Clear any current qualification; whatever comes next is the start of something new. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return decl; } /* Parse a cast-expression that is not the operand of a unary "&". */ static tree cp_parser_simple_cast_expression (cp_parser *parser) { return cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); } /* Parse a functional cast to TYPE. Returns an expression representing the cast. */ static tree cp_parser_functional_cast (cp_parser* parser, tree type) { VEC(tree,gc) *vec; tree expression_list; tree cast; bool nonconst_p; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expression_list = cp_parser_braced_list (parser, &nonconst_p); CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1; if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); return finish_compound_literal (type, expression_list, tf_warning_or_error); } vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/true, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) expression_list = error_mark_node; else { expression_list = build_tree_list_vec (vec); release_tree_vector (vec); } cast = build_functional_cast (type, expression_list, tf_warning_or_error); /* [expr.const]/1: In an integral constant expression "only type conversions to integral or enumeration type can be used". */ if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); if (cast != error_mark_node && !cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CONSTRUCTOR)) return error_mark_node; return cast; } /* Save the tokens that make up the body of a member function defined in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have already been parsed. The ATTRIBUTES are any GNU "__attribute__" specifiers applied to the declaration. Returns the FUNCTION_DECL for the member function. */ static tree cp_parser_save_member_function_body (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree attributes) { cp_token *first; cp_token *last; tree fn; /* Create the FUNCTION_DECL. */ fn = grokmethod (decl_specifiers, declarator, attributes); /* If something went badly wrong, bail out now. */ if (fn == error_mark_node) { /* If there's a function-body, skip it. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } /* Remember it, if there default args to post process. */ cp_parser_save_default_args (parser, fn); /* Save away the tokens that make up the body of the function. */ first = parser->lexer->next_token; /* We can have braced-init-list mem-initializers before the fn body. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_lexer_consume_token (parser->lexer); while (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && cp_lexer_next_token_is_not_keyword (parser->lexer, RID_TRY)) { /* cache_group will stop after an un-nested { } pair, too. */ if (cp_parser_cache_group (parser, CPP_CLOSE_PAREN, /*depth=*/0)) break; /* variadic mem-inits have ... after the ')'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) cp_lexer_consume_token (parser->lexer); } } cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); /* Handle function try blocks. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH)) cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); last = parser->lexer->next_token; /* Save away the inline definition; we will process it when the class is complete. */ DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last); DECL_PENDING_INLINE_P (fn) = 1; /* We need to know that this was defined in the class, so that friend templates are handled correctly. */ DECL_INITIALIZED_IN_CLASS_P (fn) = 1; /* Add FN to the queue of functions to be parsed later. */ VEC_safe_push (tree, gc, unparsed_funs_with_definitions, fn); return fn; } /* Save the tokens that make up the in-class initializer for a non-static data member. Returns a DEFAULT_ARG. */ static tree cp_parser_save_nsdmi (cp_parser* parser) { return cp_parser_cache_defarg (parser, /*nsdmi=*/true); } /* Parse a template-argument-list, as well as the trailing ">" (but not the opening "<"). See cp_parser_template_argument_list for the return value. */ static tree cp_parser_enclosed_template_argument_list (cp_parser* parser) { tree arguments; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; bool saved_greater_than_is_operator_p; int saved_unevaluated_operand; int saved_inhibit_evaluation_warnings; /* [temp.names] When parsing a template-id, the first non-nested `>' is taken as the end of the template-argument-list rather than a greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = false; /* Parsing the argument list may modify SCOPE, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ saved_unevaluated_operand = cp_unevaluated_operand; cp_unevaluated_operand = 0; saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; c_inhibit_evaluation_warnings = 0; /* Parse the template-argument-list itself. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER) || cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) arguments = NULL_TREE; else arguments = cp_parser_template_argument_list (parser); /* Look for the `>' that ends the template-argument-list. If we find a '>>' instead, it's probably just a typo. */ if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { if (cxx_dialect != cxx98) { /* In C++0x, a `>>' in a template argument list or cast expression is considered to be two separate `>' tokens. So, change the current token to a `>', but don't consume it: it will be consumed later when the outer template argument list (or cast expression) is parsed. Note that this replacement of `>' for `>>' is necessary even if we are parsing tentatively: in the tentative case, after calling cp_parser_enclosed_template_argument_list we will always throw away all of the template arguments and the first closing `>', either because the template argument list was erroneous or because we are replacing those tokens with a CPP_TEMPLATE_ID token. The second `>' (which will not have been thrown away) is needed either to close an outer template argument list or to complete a new-style cast. */ cp_token *token = cp_lexer_peek_token (parser->lexer); token->type = CPP_GREATER; } else if (!saved_greater_than_is_operator_p) { /* If we're in a nested template argument list, the '>>' has to be a typo for '> >'. We emit the error message, but we continue parsing and we push a '>' as next token, so that the argument list will be parsed correctly. Note that the global source location is still on the token before the '>>', so we need to say explicitly where we want it. */ cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "%<>>%> should be %<> >%> " "within a nested template argument list"); token->type = CPP_GREATER; } else { /* If this is not a nested template argument list, the '>>' is a typo for '>'. Emit an error message and continue. Same deal about the token location, but here we can get it right by consuming the '>>' before issuing the diagnostic. */ cp_token *token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "spurious %<>>%>, use %<>%> to terminate " "a template argument list"); } } else cp_parser_skip_to_end_of_template_parameter_list (parser); /* The `>' token might be a greater-than operator again now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Restore the SAVED_SCOPE. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return arguments; } /* MEMBER_FUNCTION is a member function, or a friend. If default arguments, or the body of the function have not yet been parsed, parse them now. */ static void cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function) { timevar_push (TV_PARSE_INMETH); /* If this member is a template, get the underlying FUNCTION_DECL. */ if (DECL_FUNCTION_TEMPLATE_P (member_function)) member_function = DECL_TEMPLATE_RESULT (member_function); /* There should not be any class definitions in progress at this point; the bodies of members are only parsed outside of all class definitions. */ gcc_assert (parser->num_classes_being_defined == 0); /* While we're parsing the member functions we might encounter more classes. We want to handle them right away, but we don't want them getting mixed up with functions that are currently in the queue. */ push_unparsed_function_queues (parser); /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (member_function); /* If the body of the function has not yet been parsed, parse it now. */ if (DECL_PENDING_INLINE_P (member_function)) { tree function_scope; cp_token_cache *tokens; /* The function is no longer pending; we are processing it. */ tokens = DECL_PENDING_INLINE_INFO (member_function); DECL_PENDING_INLINE_INFO (member_function) = NULL; DECL_PENDING_INLINE_P (member_function) = 0; /* If this is a local class, enter the scope of the containing function. */ function_scope = current_function_decl; if (function_scope) push_function_context (); /* Push the body of the function onto the lexer stack. */ cp_parser_push_lexer_for_tokens (parser, tokens); /* Let the front end know that we going to be defining this function. */ start_preparsed_function (member_function, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); /* Don't do access checking if it is a templated function. */ if (processing_template_decl) push_deferring_access_checks (dk_no_check); /* Now, parse the body of the function. */ cp_parser_function_definition_after_declarator (parser, /*inline_p=*/true); if (processing_template_decl) pop_deferring_access_checks (); /* Leave the scope of the containing function. */ if (function_scope) pop_function_context (); cp_parser_pop_lexer (parser); } /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); /* Restore the queue. */ pop_unparsed_function_queues (parser); timevar_pop (TV_PARSE_INMETH); } /* If DECL contains any default args, remember it on the unparsed functions queue. */ static void cp_parser_save_default_args (cp_parser* parser, tree decl) { tree probe; for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl)); probe; probe = TREE_CHAIN (probe)) if (TREE_PURPOSE (probe)) { cp_default_arg_entry *entry = VEC_safe_push (cp_default_arg_entry, gc, unparsed_funs_with_default_args, NULL); entry->class_type = current_class_type; entry->decl = decl; break; } } /* DEFAULT_ARG contains the saved tokens for the initializer of DECL, which is either a FIELD_DECL or PARM_DECL. Parse it and return the result. For a PARM_DECL, PARMTYPE is the corresponding type from the parameter-type-list. */ static tree cp_parser_late_parse_one_default_arg (cp_parser *parser, tree decl, tree default_arg, tree parmtype) { cp_token_cache *tokens; tree parsed_arg; bool dummy; if (default_arg == error_mark_node) return error_mark_node; /* Push the saved tokens for the default argument onto the parser's lexer stack. */ tokens = DEFARG_TOKENS (default_arg); cp_parser_push_lexer_for_tokens (parser, tokens); start_lambda_scope (decl); /* Parse the default argument. */ parsed_arg = cp_parser_initializer (parser, &dummy, &dummy); if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); finish_lambda_scope (); if (!processing_template_decl) { /* In a non-template class, check conversions now. In a template, we'll wait and instantiate these as needed. */ if (TREE_CODE (decl) == PARM_DECL) parsed_arg = check_default_argument (parmtype, parsed_arg); else { int flags = LOOKUP_IMPLICIT; if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg) && CONSTRUCTOR_IS_DIRECT_INIT (parsed_arg)) flags = LOOKUP_NORMAL; parsed_arg = digest_init_flags (TREE_TYPE (decl), parsed_arg, flags); } } /* If the token stream has not been completely used up, then there was extra junk after the end of the default argument. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { if (TREE_CODE (decl) == PARM_DECL) cp_parser_error (parser, "expected %<,%>"); else cp_parser_error (parser, "expected %<;%>"); } /* Revert to the main lexer. */ cp_parser_pop_lexer (parser); return parsed_arg; } /* FIELD is a non-static data member with an initializer which we saved for later; parse it now. */ static void cp_parser_late_parsing_nsdmi (cp_parser *parser, tree field) { tree def; push_unparsed_function_queues (parser); def = cp_parser_late_parse_one_default_arg (parser, field, DECL_INITIAL (field), NULL_TREE); pop_unparsed_function_queues (parser); DECL_INITIAL (field) = def; } /* FN is a FUNCTION_DECL which may contains a parameter with an unparsed DEFAULT_ARG. Parse the default args now. This function assumes that the current scope is the scope in which the default argument should be processed. */ static void cp_parser_late_parsing_default_args (cp_parser *parser, tree fn) { bool saved_local_variables_forbidden_p; tree parm, parmdecl; /* While we're parsing the default args, we might (due to the statement expression extension) encounter more classes. We want to handle them right away, but we don't want them getting mixed up with default args that are currently in the queue. */ push_unparsed_function_queues (parser); /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; push_defarg_context (fn); for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)), parmdecl = DECL_ARGUMENTS (fn); parm && parm != void_list_node; parm = TREE_CHAIN (parm), parmdecl = DECL_CHAIN (parmdecl)) { tree default_arg = TREE_PURPOSE (parm); tree parsed_arg; VEC(tree,gc) *insts; tree copy; unsigned ix; if (!default_arg) continue; if (TREE_CODE (default_arg) != DEFAULT_ARG) /* This can happen for a friend declaration for a function already declared with default arguments. */ continue; parsed_arg = cp_parser_late_parse_one_default_arg (parser, parmdecl, default_arg, TREE_VALUE (parm)); if (parsed_arg == error_mark_node) { continue; } TREE_PURPOSE (parm) = parsed_arg; /* Update any instantiations we've already created. */ for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0; VEC_iterate (tree, insts, ix, copy); ix++) TREE_PURPOSE (copy) = parsed_arg; } pop_defarg_context (); /* Make sure no default arg is missing. */ check_default_args (fn); /* Restore the state of local_variables_forbidden_p. */ parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; /* Restore the queue. */ pop_unparsed_function_queues (parser); } /* Parse the operand of `sizeof' (or a similar operator). Returns either a TYPE or an expression, depending on the form of the input. The KEYWORD indicates which kind of expression we have encountered. */ static tree cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword) { tree expr = NULL_TREE; const char *saved_message; char *tmp; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; bool pack_expansion_p = false; /* Types cannot be defined in a `sizeof' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ tmp = concat ("types may not be defined in %<", IDENTIFIER_POINTER (ridpointers[keyword]), "%> expressions", NULL); parser->type_definition_forbidden_message = tmp; /* The restrictions on constant-expressions do not apply inside sizeof expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* If it's a `...', then we are computing the length of a parameter pack. */ if (keyword == RID_SIZEOF && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); /* Note that this is an expansion. */ pack_expansion_p = true; } /* Do not actually evaluate the expression. */ ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; /* If it's a `(', then we might be looking at the type-id construction. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type; bool saved_in_type_id_in_expr_p; /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Now, look for the trailing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, then we're done. */ if (cp_parser_parse_definitely (parser)) { cp_decl_specifier_seq decl_specs; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type; /* Call grokdeclarator to figure out what type this is. */ expr = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); } } /* If the type-id production did not work out, then we must be looking at the unary-expression production. */ if (!expr) expr = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (pack_expansion_p) /* Build a pack expansion. */ expr = make_pack_expansion (expr); /* Go back to evaluating expressions. */ --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; /* Free the message we created. */ free (tmp); /* And restore the old one. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expr; } /* If the current declaration has no declarator, return true. */ static bool cp_parser_declares_only_class_p (cp_parser *parser) { /* If the next token is a `;' or a `,' then there is no declarator. */ return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); } /* Update the DECL_SPECS to reflect the storage class indicated by KEYWORD. */ static void cp_parser_set_storage_class (cp_parser *parser, cp_decl_specifier_seq *decl_specs, enum rid keyword, location_t location) { cp_storage_class storage_class; if (parser->in_unbraced_linkage_specification_p) { error_at (location, "invalid use of %qD in linkage specification", ridpointers[keyword]); return; } else if (decl_specs->storage_class != sc_none) { decl_specs->conflicting_specifiers_p = true; return; } if ((keyword == RID_EXTERN || keyword == RID_STATIC) && decl_specs->specs[(int) ds_thread]) { error_at (location, "%<__thread%> before %qD", ridpointers[keyword]); decl_specs->specs[(int) ds_thread] = 0; } switch (keyword) { case RID_AUTO: storage_class = sc_auto; break; case RID_REGISTER: storage_class = sc_register; break; case RID_STATIC: storage_class = sc_static; break; case RID_EXTERN: storage_class = sc_extern; break; case RID_MUTABLE: storage_class = sc_mutable; break; default: gcc_unreachable (); } decl_specs->storage_class = storage_class; /* A storage class specifier cannot be applied alongside a typedef specifier. If there is a typedef specifier present then set conflicting_specifiers_p which will trigger an error later on in grokdeclarator. */ if (decl_specs->specs[(int)ds_typedef]) decl_specs->conflicting_specifiers_p = true; } /* Update the DECL_SPECS to reflect the TYPE_SPEC. If TYPE_DEFINITION_P is true, the type is a class or enum definition. */ static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs, tree type_spec, location_t location, bool type_definition_p) { decl_specs->any_specifiers_p = true; /* If the user tries to redeclare bool, char16_t, char32_t, or wchar_t (with, for example, in "typedef int wchar_t;") we remember that this is what happened. In system headers, we ignore these declarations so that G++ can work with system headers that are not C++-safe. */ if (decl_specs->specs[(int) ds_typedef] && !type_definition_p && (type_spec == boolean_type_node || type_spec == char16_type_node || type_spec == char32_type_node || type_spec == wchar_type_node) && (decl_specs->type || decl_specs->specs[(int) ds_long] || decl_specs->specs[(int) ds_short] || decl_specs->specs[(int) ds_unsigned] || decl_specs->specs[(int) ds_signed])) { decl_specs->redefined_builtin_type = type_spec; if (!decl_specs->type) { decl_specs->type = type_spec; decl_specs->type_definition_p = false; decl_specs->type_location = location; } } else if (decl_specs->type) decl_specs->multiple_types_p = true; else { decl_specs->type = type_spec; decl_specs->type_definition_p = type_definition_p; decl_specs->redefined_builtin_type = NULL_TREE; decl_specs->type_location = location; } } /* DECL_SPECIFIERS is the representation of a decl-specifier-seq. Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */ static bool cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers) { return decl_specifiers->specs[(int) ds_friend] != 0; } /* Issue an error message indicating that TOKEN_DESC was expected. If KEYWORD is true, it indicated this function is called by cp_parser_require_keword and the required token can only be a indicated keyword. */ static void cp_parser_required_error (cp_parser *parser, required_token token_desc, bool keyword) { switch (token_desc) { case RT_NEW: cp_parser_error (parser, "expected %<new%>"); return; case RT_DELETE: cp_parser_error (parser, "expected %<delete%>"); return; case RT_RETURN: cp_parser_error (parser, "expected %<return%>"); return; case RT_WHILE: cp_parser_error (parser, "expected %<while%>"); return; case RT_EXTERN: cp_parser_error (parser, "expected %<extern%>"); return; case RT_STATIC_ASSERT: cp_parser_error (parser, "expected %<static_assert%>"); return; case RT_DECLTYPE: cp_parser_error (parser, "expected %<decltype%>"); return; case RT_OPERATOR: cp_parser_error (parser, "expected %<operator%>"); return; case RT_CLASS: cp_parser_error (parser, "expected %<class%>"); return; case RT_TEMPLATE: cp_parser_error (parser, "expected %<template%>"); return; case RT_NAMESPACE: cp_parser_error (parser, "expected %<namespace%>"); return; case RT_USING: cp_parser_error (parser, "expected %<using%>"); return; case RT_ASM: cp_parser_error (parser, "expected %<asm%>"); return; case RT_TRY: cp_parser_error (parser, "expected %<try%>"); return; case RT_CATCH: cp_parser_error (parser, "expected %<catch%>"); return; case RT_THROW: cp_parser_error (parser, "expected %<throw%>"); return; case RT_LABEL: cp_parser_error (parser, "expected %<__label__%>"); return; case RT_AT_TRY: cp_parser_error (parser, "expected %<@try%>"); return; case RT_AT_SYNCHRONIZED: cp_parser_error (parser, "expected %<@synchronized%>"); return; case RT_AT_THROW: cp_parser_error (parser, "expected %<@throw%>"); return; case RT_TRANSACTION_ATOMIC: cp_parser_error (parser, "expected %<__transaction_atomic%>"); return; case RT_TRANSACTION_RELAXED: cp_parser_error (parser, "expected %<__transaction_relaxed%>"); return; default: break; } if (!keyword) { switch (token_desc) { case RT_SEMICOLON: cp_parser_error (parser, "expected %<;%>"); return; case RT_OPEN_PAREN: cp_parser_error (parser, "expected %<(%>"); return; case RT_CLOSE_BRACE: cp_parser_error (parser, "expected %<}%>"); return; case RT_OPEN_BRACE: cp_parser_error (parser, "expected %<{%>"); return; case RT_CLOSE_SQUARE: cp_parser_error (parser, "expected %<]%>"); return; case RT_OPEN_SQUARE: cp_parser_error (parser, "expected %<[%>"); return; case RT_COMMA: cp_parser_error (parser, "expected %<,%>"); return; case RT_SCOPE: cp_parser_error (parser, "expected %<::%>"); return; case RT_LESS: cp_parser_error (parser, "expected %<<%>"); return; case RT_GREATER: cp_parser_error (parser, "expected %<>%>"); return; case RT_EQ: cp_parser_error (parser, "expected %<=%>"); return; case RT_ELLIPSIS: cp_parser_error (parser, "expected %<...%>"); return; case RT_MULT: cp_parser_error (parser, "expected %<*%>"); return; case RT_COMPL: cp_parser_error (parser, "expected %<~%>"); return; case RT_COLON: cp_parser_error (parser, "expected %<:%>"); return; case RT_COLON_SCOPE: cp_parser_error (parser, "expected %<:%> or %<::%>"); return; case RT_CLOSE_PAREN: cp_parser_error (parser, "expected %<)%>"); return; case RT_COMMA_CLOSE_PAREN: cp_parser_error (parser, "expected %<,%> or %<)%>"); return; case RT_PRAGMA_EOL: cp_parser_error (parser, "expected end of line"); return; case RT_NAME: cp_parser_error (parser, "expected identifier"); return; case RT_SELECT: cp_parser_error (parser, "expected selection-statement"); return; case RT_INTERATION: cp_parser_error (parser, "expected iteration-statement"); return; case RT_JUMP: cp_parser_error (parser, "expected jump-statement"); return; case RT_CLASS_KEY: cp_parser_error (parser, "expected class-key"); return; case RT_CLASS_TYPENAME_TEMPLATE: cp_parser_error (parser, "expected %<class%>, %<typename%>, or %<template%>"); return; default: gcc_unreachable (); } } else gcc_unreachable (); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require (cp_parser* parser, enum cpp_ttype type, required_token token_desc) { if (cp_lexer_next_token_is (parser->lexer, type)) return cp_lexer_consume_token (parser->lexer); else { /* Output the MESSAGE -- unless we're parsing tentatively. */ if (!cp_parser_simulate_error (parser)) cp_parser_required_error (parser, token_desc, /*keyword=*/false); return NULL; } } /* An error message is produced if the next token is not '>'. All further tokens are skipped until the desired token is found or '{', '}', ';' or an unbalanced ')' or ']'. */ static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser) { /* Current level of '< ... >'. */ unsigned level = 0; /* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */ unsigned nesting_depth = 0; /* Are we ready, yet? If not, issue error message. */ if (cp_parser_require (parser, CPP_GREATER, RT_GREATER)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_LESS: if (!nesting_depth) ++level; break; case CPP_RSHIFT: if (cxx_dialect == cxx98) /* C++0x views the `>>' operator as two `>' tokens, but C++98 does not. */ break; else if (!nesting_depth && level-- == 0) { /* We've hit a `>>' where the first `>' closes the template argument list, and the second `>' is spurious. Just consume the `>>' and stop; we've already produced at least one error. */ cp_lexer_consume_token (parser->lexer); return; } /* Fall through for C++0x, so we handle the second `>' in the `>>'. */ case CPP_GREATER: if (!nesting_depth && level-- == 0) { /* We've reached the token we want, consume it and stop. */ cp_lexer_consume_token (parser->lexer); return; } break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: ++nesting_depth; break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: if (nesting_depth-- == 0) return; break; case CPP_EOF: case CPP_PRAGMA_EOL: case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: /* The '>' was probably forgotten, don't look further. */ return; default: break; } /* Consume this token. */ cp_lexer_consume_token (parser->lexer); } } /* If the next token is the indicated keyword, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require_keyword (cp_parser* parser, enum rid keyword, required_token token_desc) { cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc); if (token && token->keyword != keyword) { cp_parser_required_error (parser, token_desc, /*keyword=*/true); return NULL; } return token; } /* Returns TRUE iff TOKEN is a token that can begin the body of a function-definition. */ static bool cp_parser_token_starts_function_definition_p (cp_token* token) { return (/* An ordinary function-body begins with an `{'. */ token->type == CPP_OPEN_BRACE /* A ctor-initializer begins with a `:'. */ || token->type == CPP_COLON /* A function-try-block begins with `try'. */ || token->keyword == RID_TRY /* A function-transaction-block begins with `__transaction_atomic' or `__transaction_relaxed'. */ || token->keyword == RID_TRANSACTION_ATOMIC || token->keyword == RID_TRANSACTION_RELAXED /* The named return value extension begins with `return'. */ || token->keyword == RID_RETURN); } /* Returns TRUE iff the next token is the ":" or "{" beginning a class definition. */ static bool cp_parser_next_token_starts_class_definition_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_OPEN_BRACE || token->type == CPP_COLON); } /* Returns TRUE iff the next token is the "," or ">" (or `>>', in C++0x) ending a template-argument. */ static bool cp_parser_next_token_ends_template_argument_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_COMMA || token->type == CPP_GREATER || token->type == CPP_ELLIPSIS || ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)); } /* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the (n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */ static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser, size_t n) { cp_token *token; token = cp_lexer_peek_nth_token (parser->lexer, n); if (token->type == CPP_LESS) return true; /* Check for the sequence `<::' in the original code. It would be lexed as `[:', where `[' is a digraph, and there is no whitespace before `:'. */ if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH) { cp_token *token2; token2 = cp_lexer_peek_nth_token (parser->lexer, n+1); if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE)) return true; } return false; } /* Returns the kind of tag indicated by TOKEN, if it is a class-key, or none_type otherwise. */ static enum tag_types cp_parser_token_is_class_key (cp_token* token) { switch (token->keyword) { case RID_CLASS: return class_type; case RID_STRUCT: return record_type; case RID_UNION: return union_type; default: return none_type; } } /* Issue an error message if the CLASS_KEY does not match the TYPE. */ static void cp_parser_check_class_key (enum tag_types class_key, tree type) { if (type == error_mark_node) return; if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type)) { permerror (input_location, "%qs tag used in naming %q#T", class_key == union_type ? "union" : class_key == record_type ? "struct" : "class", type); inform (DECL_SOURCE_LOCATION (TYPE_NAME (type)), "%q#T was previously declared here", type); } } /* Issue an error message if DECL is redeclared with different access than its original declaration [class.access.spec/3]. This applies to nested classes and nested class templates. [class.mem/1]. */ static void cp_parser_check_access_in_redeclaration (tree decl, location_t location) { if (!decl || !CLASS_TYPE_P (TREE_TYPE (decl))) return; if ((TREE_PRIVATE (decl) != (current_access_specifier == access_private_node)) || (TREE_PROTECTED (decl) != (current_access_specifier == access_protected_node))) error_at (location, "%qD redeclared with different access", decl); } /* Look for the `template' keyword, as a syntactic disambiguator. Return TRUE iff it is present, in which case it will be consumed. */ static bool cp_parser_optional_template_keyword (cp_parser *parser) { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* The `template' keyword can only be used within templates; outside templates the parser can always figure out what is a template and what is not. */ if (!processing_template_decl) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "%<template%> (as a disambiguator) is only allowed " "within templates"); /* If this part of the token stream is rescanned, the same error message would be generated. So, we purge the token from the stream. */ cp_lexer_purge_token (parser->lexer); return false; } else { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); return true; } } return false; } /* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token, set PARSER->SCOPE, and perform other related actions. */ static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser) { int i; struct tree_check *check_value; deferred_access_check *chk; VEC (deferred_access_check,gc) *checks; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ checks = check_value->checks; if (checks) { FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk) perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl); } /* Set the scope from the stored value. */ parser->scope = check_value->value; parser->qualifying_scope = check_value->qualifying_scope; parser->object_scope = NULL_TREE; } /* Consume tokens up through a non-nested END token. Returns TRUE if we encounter the end of a block before what we were looking for. */ static bool cp_parser_cache_group (cp_parser *parser, enum cpp_ttype end, unsigned depth) { while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* Abort a parenthesized expression if we encounter a semicolon. */ if ((end == CPP_CLOSE_PAREN || depth == 0) && token->type == CPP_SEMICOLON) return true; /* If we've reached the end of the file, stop. */ if (token->type == CPP_EOF || (end != CPP_PRAGMA_EOL && token->type == CPP_PRAGMA_EOL)) return true; if (token->type == CPP_CLOSE_BRACE && depth == 0) /* We've hit the end of an enclosing block, so there's been some kind of syntax error. */ return true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* See if it starts a new group. */ if (token->type == CPP_OPEN_BRACE) { cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1); /* In theory this should probably check end == '}', but cp_parser_save_member_function_body needs it to exit after either '}' or ')' when called with ')'. */ if (depth == 0) return false; } else if (token->type == CPP_OPEN_PAREN) { cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1); if (depth == 0 && end == CPP_CLOSE_PAREN) return false; } else if (token->type == CPP_PRAGMA) cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1); else if (token->type == end) return false; } } /* Like above, for caching a default argument or NSDMI. Both of these are terminated by a non-nested comma, but it can be unclear whether or not a comma is nested in a template argument list unless we do more parsing. In order to handle this ambiguity, when we encounter a ',' after a '<' we try to parse what follows as a parameter-declaration-list (in the case of a default argument) or a member-declarator (in the case of an NSDMI). If that succeeds, then we stop caching. */ static tree cp_parser_cache_defarg (cp_parser *parser, bool nsdmi) { unsigned depth = 0; int maybe_template_id = 0; cp_token *first_token; cp_token *token; tree default_argument; /* Add tokens until we have processed the entire default argument. We add the range [first_token, token). */ first_token = cp_lexer_peek_token (parser->lexer); if (first_token->type == CPP_OPEN_BRACE) { /* For list-initialization, this is straightforward. */ cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); token = cp_lexer_peek_token (parser->lexer); } else while (true) { bool done = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* What we do depends on what token we have. */ switch (token->type) { /* In valid code, a default argument must be immediately followed by a `,' `)', or `...'. */ case CPP_COMMA: if (depth == 0 && maybe_template_id) { /* If we've seen a '<', we might be in a template-argument-list. Until Core issue 325 is resolved, we don't know how this situation ought to be handled, so try to DTRT. We check whether what comes after the comma is a valid parameter declaration list. If it is, then the comma ends the default argument; otherwise the default argument continues. */ bool error = false; tree t; /* Set ITALP so cp_parser_parameter_declaration_list doesn't decide to commit to this parse. */ bool saved_italp = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; cp_parser_parse_tentatively (parser); cp_lexer_consume_token (parser->lexer); if (nsdmi) { int ctor_dtor_or_conv_p; cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true); } else { begin_scope (sk_function_parms, NULL_TREE); cp_parser_parameter_declaration_list (parser, &error); for (t = current_binding_level->names; t; t = DECL_CHAIN (t)) pop_binding (DECL_NAME (t), t); leave_scope (); } if (!cp_parser_error_occurred (parser) && !error) done = true; cp_parser_abort_tentative_parse (parser); parser->in_template_argument_list_p = saved_italp; break; } case CPP_CLOSE_PAREN: case CPP_ELLIPSIS: /* If we run into a non-nested `;', `}', or `]', then the code is invalid -- but the default argument is certainly over. */ case CPP_SEMICOLON: case CPP_CLOSE_BRACE: case CPP_CLOSE_SQUARE: if (depth == 0) done = true; /* Update DEPTH, if necessary. */ else if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_SQUARE) --depth; break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: case CPP_OPEN_BRACE: ++depth; break; case CPP_LESS: if (depth == 0) /* This might be the comparison operator, or it might start a template argument list. */ ++maybe_template_id; break; case CPP_RSHIFT: if (cxx_dialect == cxx98) break; /* Fall through for C++0x, which treats the `>>' operator like two `>' tokens in certain cases. */ case CPP_GREATER: if (depth == 0) { /* This might be an operator, or it might close a template argument list. But if a previous '<' started a template argument list, this will have closed it, so we can't be in one anymore. */ maybe_template_id -= 1 + (token->type == CPP_RSHIFT); if (maybe_template_id < 0) maybe_template_id = 0; } break; /* If we run out of tokens, issue an error message. */ case CPP_EOF: case CPP_PRAGMA_EOL: error_at (token->location, "file ends in default argument"); done = true; break; case CPP_NAME: case CPP_SCOPE: /* In these cases, we should look for template-ids. For example, if the default argument is `X<int, double>()', we need to do name lookup to figure out whether or not `X' is a template; if so, the `,' does not end the default argument. That is not yet done. */ break; default: break; } /* If we've reached the end, stop. */ if (done) break; /* Add the token to the token block. */ token = cp_lexer_consume_token (parser->lexer); } /* Create a DEFAULT_ARG to represent the unparsed default argument. */ default_argument = make_node (DEFAULT_ARG); DEFARG_TOKENS (default_argument) = cp_token_cache_new (first_token, token); DEFARG_INSTANTIATIONS (default_argument) = NULL; return default_argument; } /* Begin parsing tentatively. We always save tokens while parsing tentatively so that if the tentative parsing fails we can restore the tokens. */ static void cp_parser_parse_tentatively (cp_parser* parser) { /* Enter a new parsing context. */ parser->context = cp_parser_context_new (parser->context); /* Begin saving tokens. */ cp_lexer_save_tokens (parser->lexer); /* In order to avoid repetitive access control error messages, access checks are queued up until we are no longer parsing tentatively. */ push_deferring_access_checks (dk_deferred); } /* Commit to the currently active tentative parse. */ static void cp_parser_commit_to_tentative_parse (cp_parser* parser) { cp_parser_context *context; cp_lexer *lexer; /* Mark all of the levels as committed. */ lexer = parser->lexer; for (context = parser->context; context->next; context = context->next) { if (context->status == CP_PARSER_STATUS_KIND_COMMITTED) break; context->status = CP_PARSER_STATUS_KIND_COMMITTED; while (!cp_lexer_saving_tokens (lexer)) lexer = lexer->next; cp_lexer_commit_tokens (lexer); } } /* Abort the currently active tentative parse. All consumed tokens will be rolled back, and no diagnostics will be issued. */ static void cp_parser_abort_tentative_parse (cp_parser* parser) { gcc_assert (parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED || errorcount > 0); cp_parser_simulate_error (parser); /* Now, pretend that we want to see if the construct was successfully parsed. */ cp_parser_parse_definitely (parser); } /* Stop parsing tentatively. If a parse error has occurred, restore the token stream. Otherwise, commit to the tokens we have consumed. Returns true if no error occurred; false otherwise. */ static bool cp_parser_parse_definitely (cp_parser* parser) { bool error_occurred; cp_parser_context *context; /* Remember whether or not an error occurred, since we are about to destroy that information. */ error_occurred = cp_parser_error_occurred (parser); /* Remove the topmost context from the stack. */ context = parser->context; parser->context = context->next; /* If no parse errors occurred, commit to the tentative parse. */ if (!error_occurred) { /* Commit to the tokens read tentatively, unless that was already done. */ if (context->status != CP_PARSER_STATUS_KIND_COMMITTED) cp_lexer_commit_tokens (parser->lexer); pop_to_parent_deferring_access_checks (); } /* Otherwise, if errors occurred, roll back our state so that things are just as they were before we began the tentative parse. */ else { cp_lexer_rollback_tokens (parser->lexer); pop_deferring_access_checks (); } /* Add the context to the front of the free list. */ context->next = cp_parser_context_free_list; cp_parser_context_free_list = context; return !error_occurred; } /* Returns true if we are parsing tentatively and are not committed to this tentative parse. */ static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED); } /* Returns nonzero iff an error has occurred during the most recent tentative parse. */ static bool cp_parser_error_occurred (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status == CP_PARSER_STATUS_KIND_ERROR); } /* Returns nonzero if GNU extensions are allowed. */ static bool cp_parser_allow_gnu_extensions_p (cp_parser* parser) { return parser->allow_gnu_extensions_p; } /* Objective-C++ Productions */ /* Parse an Objective-C expression, which feeds into a primary-expression above. objc-expression: objc-message-expression objc-string-literal objc-encode-expression objc-protocol-expression objc-selector-expression Returns a tree representation of the expression. */ static tree cp_parser_objc_expression (cp_parser* parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->type) { case CPP_OPEN_SQUARE: return cp_parser_objc_message_expression (parser); case CPP_OBJC_STRING: kwd = cp_lexer_consume_token (parser->lexer); return objc_build_string_object (kwd->u.value); case CPP_KEYWORD: switch (kwd->keyword) { case RID_AT_ENCODE: return cp_parser_objc_encode_expression (parser); case RID_AT_PROTOCOL: return cp_parser_objc_protocol_expression (parser); case RID_AT_SELECTOR: return cp_parser_objc_selector_expression (parser); default: break; } default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* Parse an Objective-C message expression. objc-message-expression: [ objc-message-receiver objc-message-args ] Returns a representation of an Objective-C message. */ static tree cp_parser_objc_message_expression (cp_parser* parser) { tree receiver, messageargs; cp_lexer_consume_token (parser->lexer); /* Eat '['. */ receiver = cp_parser_objc_message_receiver (parser); messageargs = cp_parser_objc_message_args (parser); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return objc_build_message_expr (receiver, messageargs); } /* Parse an objc-message-receiver. objc-message-receiver: expression simple-type-specifier Returns a representation of the type or expression. */ static tree cp_parser_objc_message_receiver (cp_parser* parser) { tree rcv; /* An Objective-C message receiver may be either (1) a type or (2) an expression. */ cp_parser_parse_tentatively (parser); rcv = cp_parser_expression (parser, false, NULL); if (cp_parser_parse_definitely (parser)) return rcv; rcv = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); return objc_get_class_reference (rcv); } /* Parse the arguments and selectors comprising an Objective-C message. objc-message-args: objc-selector objc-selector-args objc-selector-args , objc-comma-args objc-selector-args: objc-selector [opt] : assignment-expression objc-selector-args objc-selector [opt] : assignment-expression objc-comma-args: assignment-expression objc-comma-args , assignment-expression Returns a TREE_LIST, with TREE_PURPOSE containing a list of selector arguments and TREE_VALUE containing a list of comma arguments. */ static tree cp_parser_objc_message_args (cp_parser* parser) { tree sel_args = NULL_TREE, addl_args = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, arg; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) return build_tree_list (selector, NULL_TREE); maybe_unary_selector_p = false; cp_parser_require (parser, CPP_COLON, RT_COLON); arg = cp_parser_assignment_expression (parser, false, NULL); sel_args = chainon (sel_args, build_tree_list (selector, arg)); token = cp_lexer_peek_token (parser->lexer); } /* Handle non-selector arguments, if any. */ while (token->type == CPP_COMMA) { tree arg; cp_lexer_consume_token (parser->lexer); arg = cp_parser_assignment_expression (parser, false, NULL); addl_args = chainon (addl_args, build_tree_list (NULL_TREE, arg)); token = cp_lexer_peek_token (parser->lexer); } if (sel_args == NULL_TREE && addl_args == NULL_TREE) { cp_parser_error (parser, "objective-c++ message argument(s) are expected"); return build_tree_list (error_mark_node, error_mark_node); } return build_tree_list (sel_args, addl_args); } /* Parse an Objective-C encode expression. objc-encode-expression: @encode objc-typename Returns an encoded representation of the type argument. */ static tree cp_parser_objc_encode_expression (cp_parser* parser) { tree type; cp_token *token; cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); token = cp_lexer_peek_token (parser->lexer); type = complete_type (cp_parser_type_id (parser)); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); if (!type) { error_at (token->location, "%<@encode%> must specify a type as an argument"); return error_mark_node; } /* This happens if we find @encode(T) (where T is a template typename or something dependent on a template typename) when parsing a template. In that case, we can't compile it immediately, but we rather create an AT_ENCODE_EXPR which will need to be instantiated when the template is used. */ if (dependent_type_p (type)) { tree value = build_min (AT_ENCODE_EXPR, size_type_node, type); TREE_READONLY (value) = 1; return value; } return objc_build_encode_expr (type); } /* Parse an Objective-C @defs expression. */ static tree cp_parser_objc_defs_expression (cp_parser *parser) { tree name; cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); name = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_get_class_ivars (name); } /* Parse an Objective-C protocol expression. objc-protocol-expression: @protocol ( identifier ) Returns a representation of the protocol expression. */ static tree cp_parser_objc_protocol_expression (cp_parser* parser) { tree proto; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); proto = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_build_protocol_expr (proto); } /* Parse an Objective-C selector expression. objc-selector-expression: @selector ( objc-method-signature ) objc-method-signature: objc-selector objc-selector-seq objc-selector-seq: objc-selector : objc-selector-seq objc-selector : Returns a representation of the method selector. */ static tree cp_parser_objc_selector_expression (cp_parser* parser) { tree sel_seq = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token; location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON || token->type == CPP_SCOPE) { tree selector = NULL_TREE; if (token->type != CPP_COLON || token->type == CPP_SCOPE) selector = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE)) { /* Detect if we have a unary selector. */ if (maybe_unary_selector_p) { sel_seq = selector; goto finish_selector; } else { cp_parser_error (parser, "expected %<:%>"); } } maybe_unary_selector_p = false; token = cp_lexer_consume_token (parser->lexer); if (token->type == CPP_SCOPE) { sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); sel_seq = chainon (sel_seq, build_tree_list (NULL_TREE, NULL_TREE)); } else sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); token = cp_lexer_peek_token (parser->lexer); } finish_selector: cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_build_selector_expr (loc, sel_seq); } /* Parse a list of identifiers. objc-identifier-list: identifier objc-identifier-list , identifier Returns a TREE_LIST of identifier nodes. */ static tree cp_parser_objc_identifier_list (cp_parser* parser) { tree identifier; tree list; cp_token *sep; identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; list = build_tree_list (NULL_TREE, identifier); sep = cp_lexer_peek_token (parser->lexer); while (sep->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return list; list = chainon (list, build_tree_list (NULL_TREE, identifier)); sep = cp_lexer_peek_token (parser->lexer); } return list; } /* Parse an Objective-C alias declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; This function registers the alias mapping with the Objective-C front end. It returns nothing. */ static void cp_parser_objc_alias_declaration (cp_parser* parser) { tree alias, orig; cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */ alias = cp_parser_identifier (parser); orig = cp_parser_identifier (parser); objc_declare_alias (alias, orig); cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C class forward-declaration. objc-class-declaration: @class objc-identifier-list ; The function registers the forward declarations with the Objective-C front end. It returns nothing. */ static void cp_parser_objc_class_declaration (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */ while (true) { tree id; id = cp_parser_identifier (parser); if (id == error_mark_node) break; objc_declare_class (id); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse a list of Objective-C protocol references. objc-protocol-refs-opt: objc-protocol-refs [opt] objc-protocol-refs: < objc-identifier-list > Returns a TREE_LIST of identifiers, if any. */ static tree cp_parser_objc_protocol_refs_opt (cp_parser* parser) { tree protorefs = NULL_TREE; if(cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { cp_lexer_consume_token (parser->lexer); /* Eat '<'. */ protorefs = cp_parser_objc_identifier_list (parser); cp_parser_require (parser, CPP_GREATER, RT_GREATER); } return protorefs; } /* Parse a Objective-C visibility specification. */ static void cp_parser_objc_visibility_spec (cp_parser* parser) { cp_token *vis = cp_lexer_peek_token (parser->lexer); switch (vis->keyword) { case RID_AT_PRIVATE: objc_set_visibility (OBJC_IVAR_VIS_PRIVATE); break; case RID_AT_PROTECTED: objc_set_visibility (OBJC_IVAR_VIS_PROTECTED); break; case RID_AT_PUBLIC: objc_set_visibility (OBJC_IVAR_VIS_PUBLIC); break; case RID_AT_PACKAGE: objc_set_visibility (OBJC_IVAR_VIS_PACKAGE); break; default: return; } /* Eat '@private'/'@protected'/'@public'. */ cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C method type. Return 'true' if it is a class (+) method, and 'false' if it is an instance (-) method. */ static inline bool cp_parser_objc_method_type (cp_parser* parser) { if (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS) return true; else return false; } /* Parse an Objective-C protocol qualifier. */ static tree cp_parser_objc_protocol_qualifiers (cp_parser* parser) { tree quals = NULL_TREE, node; cp_token *token = cp_lexer_peek_token (parser->lexer); node = token->u.value; while (node && TREE_CODE (node) == IDENTIFIER_NODE && (node == ridpointers [(int) RID_IN] || node == ridpointers [(int) RID_OUT] || node == ridpointers [(int) RID_INOUT] || node == ridpointers [(int) RID_BYCOPY] || node == ridpointers [(int) RID_BYREF] || node == ridpointers [(int) RID_ONEWAY])) { quals = tree_cons (NULL_TREE, node, quals); cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); node = token->u.value; } return quals; } /* Parse an Objective-C typename. */ static tree cp_parser_objc_typename (cp_parser* parser) { tree type_name = NULL_TREE; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree proto_quals, cp_type = NULL_TREE; cp_lexer_consume_token (parser->lexer); /* Eat '('. */ proto_quals = cp_parser_objc_protocol_qualifiers (parser); /* An ObjC type name may consist of just protocol qualifiers, in which case the type shall default to 'id'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) { cp_type = cp_parser_type_id (parser); /* If the type could not be parsed, an error has already been produced. For error recovery, behave as if it had not been specified, which will use the default type 'id'. */ if (cp_type == error_mark_node) { cp_type = NULL_TREE; /* We need to skip to the closing parenthesis as cp_parser_type_id() does not seem to do it for us. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); } } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); type_name = build_tree_list (proto_quals, cp_type); } return type_name; } /* Check to see if TYPE refers to an Objective-C selector name. */ static bool cp_parser_objc_selector_p (enum cpp_ttype type) { return (type == CPP_NAME || type == CPP_KEYWORD || type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND || type == CPP_OR || type == CPP_COMPL || type == CPP_NOT || type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ || type == CPP_XOR || type == CPP_XOR_EQ); } /* Parse an Objective-C selector. */ static tree cp_parser_objc_selector (cp_parser* parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); if (!cp_parser_objc_selector_p (token->type)) { error_at (token->location, "invalid Objective-C++ selector name"); return error_mark_node; } /* C++ operator names are allowed to appear in ObjC selectors. */ switch (token->type) { case CPP_AND_AND: return get_identifier ("and"); case CPP_AND_EQ: return get_identifier ("and_eq"); case CPP_AND: return get_identifier ("bitand"); case CPP_OR: return get_identifier ("bitor"); case CPP_COMPL: return get_identifier ("compl"); case CPP_NOT: return get_identifier ("not"); case CPP_NOT_EQ: return get_identifier ("not_eq"); case CPP_OR_OR: return get_identifier ("or"); case CPP_OR_EQ: return get_identifier ("or_eq"); case CPP_XOR: return get_identifier ("xor"); case CPP_XOR_EQ: return get_identifier ("xor_eq"); default: return token->u.value; } } /* Parse an Objective-C params list. */ static tree cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes) { tree params = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, type_name, identifier; tree parm_attr = NULL_TREE; if (token->keyword == RID_ATTRIBUTE) break; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { params = selector; /* Might be followed by attributes. */ break; } maybe_unary_selector_p = false; if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) { /* Something went quite wrong. There should be a colon here, but there is not. Stop parsing parameters. */ break; } type_name = cp_parser_objc_typename (parser); /* New ObjC allows attributes on parameters too. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) parm_attr = cp_parser_attributes_opt (parser); identifier = cp_parser_identifier (parser); params = chainon (params, objc_build_keyword_decl (selector, type_name, identifier, parm_attr)); token = cp_lexer_peek_token (parser->lexer); } if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } /* We allow tail attributes for the method. */ if (token->keyword == RID_ATTRIBUTE) { *attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) return params; cp_parser_error (parser, "method attributes must be specified at the end"); return error_mark_node; } if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } return params; } /* Parse the non-keyword Objective-C params. */ static tree cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp, tree* attributes) { tree params = make_node (TREE_LIST); cp_token *token = cp_lexer_peek_token (parser->lexer); *ellipsisp = false; /* Initially, assume no ellipsis. */ while (token->type == CPP_COMMA) { cp_parameter_declarator *parmdecl; tree parm; cp_lexer_consume_token (parser->lexer); /* Eat ','. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_ELLIPSIS) { cp_lexer_consume_token (parser->lexer); /* Eat '...'. */ *ellipsisp = true; token = cp_lexer_peek_token (parser->lexer); break; } /* TODO: parse attributes for tail parameters. */ parmdecl = cp_parser_parameter_declaration (parser, false, NULL); parm = grokdeclarator (parmdecl->declarator, &parmdecl->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); chainon (params, build_tree_list (NULL_TREE, parm)); token = cp_lexer_peek_token (parser->lexer); } /* We allow tail attributes for the method. */ if (token->keyword == RID_ATTRIBUTE) { if (*attributes == NULL_TREE) { *attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) return params; } else /* We have an error, but parse the attributes, so that we can carry on. */ *attributes = cp_parser_attributes_opt (parser); cp_parser_error (parser, "method attributes must be specified at the end"); return error_mark_node; } return params; } /* Parse a linkage specification, a pragma, an extra semicolon or a block. */ static void cp_parser_objc_interstitial_code (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token->keyword == RID_EXTERN && cp_parser_is_pure_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) cp_parser_linkage_specification (parser); /* Handle #pragma, if any. */ else if (token->type == CPP_PRAGMA) cp_parser_pragma (parser, pragma_external); /* Allow stray semicolons. */ else if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); /* Mark methods as optional or required, when building protocols. */ else if (token->keyword == RID_AT_OPTIONAL) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (true); } else if (token->keyword == RID_AT_REQUIRED) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (false); } else if (token->keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* Other stray characters must generate errors. */ else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE) { cp_lexer_consume_token (parser->lexer); error ("stray %qs between Objective-C++ methods", token->type == CPP_OPEN_BRACE ? "{" : "}"); } /* Finally, try to parse a block-declaration, or a function-definition. */ else cp_parser_block_declaration (parser, /*statement_p=*/false); } /* Parse a method signature. */ static tree cp_parser_objc_method_signature (cp_parser* parser, tree* attributes) { tree rettype, kwdparms, optparms; bool ellipsis = false; bool is_class_method; is_class_method = cp_parser_objc_method_type (parser); rettype = cp_parser_objc_typename (parser); *attributes = NULL_TREE; kwdparms = cp_parser_objc_method_keyword_params (parser, attributes); if (kwdparms == error_mark_node) return error_mark_node; optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes); if (optparms == error_mark_node) return error_mark_node; return objc_build_method_signature (is_class_method, rettype, kwdparms, optparms, ellipsis); } static bool cp_parser_objc_method_maybe_bad_prefix_attributes (cp_parser* parser) { tree tattr; cp_lexer_save_tokens (parser->lexer); tattr = cp_parser_attributes_opt (parser); gcc_assert (tattr) ; /* If the attributes are followed by a method introducer, this is not allowed. Dump the attributes and flag the situation. */ if (cp_lexer_next_token_is (parser->lexer, CPP_PLUS) || cp_lexer_next_token_is (parser->lexer, CPP_MINUS)) return true; /* Otherwise, the attributes introduce some interstitial code, possibly so rewind to allow that check. */ cp_lexer_rollback_tokens (parser->lexer); return false; } /* Parse an Objective-C method prototype list. */ static void cp_parser_objc_method_prototype_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); while (token->keyword != RID_AT_END && token->type != CPP_EOF) { if (token->type == CPP_PLUS || token->type == CPP_MINUS) { tree attributes, sig; bool is_class_method; if (token->type == CPP_PLUS) is_class_method = true; else is_class_method = false; sig = cp_parser_objc_method_signature (parser, &attributes); if (sig == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); token = cp_lexer_peek_token (parser->lexer); continue; } objc_add_method_declaration (is_class_method, sig, attributes); cp_parser_consume_semicolon_at_end_of_statement (parser); } else if (token->keyword == RID_AT_PROPERTY) cp_parser_objc_at_property_declaration (parser); else if (token->keyword == RID_ATTRIBUTE && cp_parser_objc_method_maybe_bad_prefix_attributes(parser)) warning_at (cp_lexer_peek_token (parser->lexer)->location, OPT_Wattributes, "prefix attributes are ignored for methods"); else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ else cp_parser_error (parser, "expected %<@end%>"); objc_finish_interface (); } /* Parse an Objective-C method definition list. */ static void cp_parser_objc_method_definition_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); while (token->keyword != RID_AT_END && token->type != CPP_EOF) { tree meth; if (token->type == CPP_PLUS || token->type == CPP_MINUS) { cp_token *ptk; tree sig, attribute; bool is_class_method; if (token->type == CPP_PLUS) is_class_method = true; else is_class_method = false; push_deferring_access_checks (dk_deferred); sig = cp_parser_objc_method_signature (parser, &attribute); if (sig == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); token = cp_lexer_peek_token (parser->lexer); continue; } objc_start_method_definition (is_class_method, sig, attribute, NULL_TREE); /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); ptk = cp_lexer_peek_token (parser->lexer); if (!(ptk->type == CPP_PLUS || ptk->type == CPP_MINUS || ptk->type == CPP_EOF || ptk->keyword == RID_AT_END)) { perform_deferred_access_checks (); stop_deferring_access_checks (); meth = cp_parser_function_definition_after_declarator (parser, false); pop_deferring_access_checks (); objc_finish_method_definition (meth); } } /* The following case will be removed once @synthesize is completely implemented. */ else if (token->keyword == RID_AT_PROPERTY) cp_parser_objc_at_property_declaration (parser); else if (token->keyword == RID_AT_SYNTHESIZE) cp_parser_objc_at_synthesize_declaration (parser); else if (token->keyword == RID_AT_DYNAMIC) cp_parser_objc_at_dynamic_declaration (parser); else if (token->keyword == RID_ATTRIBUTE && cp_parser_objc_method_maybe_bad_prefix_attributes(parser)) warning_at (token->location, OPT_Wattributes, "prefix attributes are ignored for methods"); else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ else cp_parser_error (parser, "expected %<@end%>"); objc_finish_implementation (); } /* Parse Objective-C ivars. */ static void cp_parser_objc_class_ivars (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_OPEN_BRACE) return; /* No ivars specified. */ cp_lexer_consume_token (parser->lexer); /* Eat '{'. */ token = cp_lexer_peek_token (parser->lexer); while (token->type != CPP_CLOSE_BRACE && token->keyword != RID_AT_END && token->type != CPP_EOF) { cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_objc_visibility_spec (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) break; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &declspecs, &decl_class_or_enum_p); /* auto, register, static, extern, mutable. */ if (declspecs.storage_class != sc_none) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.storage_class = sc_none; } /* __thread. */ if (declspecs.specs[(int) ds_thread]) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.specs[(int) ds_thread] = 0; } /* typedef. */ if (declspecs.specs[(int) ds_typedef]) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.specs[(int) ds_typedef] = 0; } prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree width = NULL_TREE, attributes, first_attribute, decl; cp_declarator *declarator = NULL; int ctor_dtor_or_conv_p; /* Check for a (possibly unnamed) bitfield declaration. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COLON) goto eat_colon; if (token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { /* Get the name of the bitfield. */ declarator = make_id_declarator (NULL_TREE, cp_parser_identifier (parser), sfk_none); eat_colon: cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); } else { /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/false); } /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); if (width) /* Create the bitfield declaration. */ decl = grokbitfield (declarator, &declspecs, width, attributes); else decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); /* Add the instance variable. */ if (decl != error_mark_node && decl != NULL_TREE) objc_add_instance_variable (decl); /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } break; } cp_parser_consume_semicolon_at_end_of_statement (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->keyword == RID_AT_END) cp_parser_error (parser, "expected %<}%>"); /* Do not consume the RID_AT_END, so it will be read again as terminating the @interface of @implementation. */ if (token->keyword != RID_AT_END && token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '}'. */ /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C protocol declaration. */ static void cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes) { tree proto, protorefs; cp_token *tok; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { tok = cp_lexer_peek_token (parser->lexer); error_at (tok->location, "identifier expected after %<@protocol%>"); cp_parser_consume_semicolon_at_end_of_statement (parser); return; } /* See if we have a forward declaration or a definition. */ tok = cp_lexer_peek_nth_token (parser->lexer, 2); /* Try a forward declaration first. */ if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON) { while (true) { tree id; id = cp_parser_identifier (parser); if (id == error_mark_node) break; objc_declare_protocol (id, attributes); if(cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Ok, we got a full-fledged definition (or at least should). */ else { proto = cp_parser_identifier (parser); protorefs = cp_parser_objc_protocol_refs_opt (parser); objc_start_protocol (proto, protorefs, attributes); cp_parser_objc_method_prototype_list (parser); } } /* Parse an Objective-C superclass or category. */ static void cp_parser_objc_superclass_or_category (cp_parser *parser, bool iface_p, tree *super, tree *categ, bool *is_class_extension) { cp_token *next = cp_lexer_peek_token (parser->lexer); *super = *categ = NULL_TREE; *is_class_extension = false; if (next->type == CPP_COLON) { cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ *super = cp_parser_identifier (parser); } else if (next->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); /* Eat '('. */ /* If there is no category name, and this is an @interface, we have a class extension. */ if (iface_p && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) { *categ = NULL_TREE; *is_class_extension = true; } else *categ = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } } /* Parse an Objective-C class interface. */ static void cp_parser_objc_class_interface (cp_parser* parser, tree attributes) { tree name, super, categ, protos; bool is_class_extension; cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */ name = cp_parser_identifier (parser); if (name == error_mark_node) { /* It's hard to recover because even if valid @interface stuff is to follow, we can't compile it (or validate it) if we don't even know which class it refers to. Let's assume this was a stray '@interface' token in the stream and skip it. */ return; } cp_parser_objc_superclass_or_category (parser, true, &super, &categ, &is_class_extension); protos = cp_parser_objc_protocol_refs_opt (parser); /* We have either a class or a category on our hands. */ if (categ || is_class_extension) objc_start_category_interface (name, categ, protos, attributes); else { objc_start_class_interface (name, super, protos, attributes); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_interface (); } cp_parser_objc_method_prototype_list (parser); } /* Parse an Objective-C class implementation. */ static void cp_parser_objc_class_implementation (cp_parser* parser) { tree name, super, categ; bool is_class_extension; cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */ name = cp_parser_identifier (parser); if (name == error_mark_node) { /* It's hard to recover because even if valid @implementation stuff is to follow, we can't compile it (or validate it) if we don't even know which class it refers to. Let's assume this was a stray '@implementation' token in the stream and skip it. */ return; } cp_parser_objc_superclass_or_category (parser, false, &super, &categ, &is_class_extension); /* We have either a class or a category on our hands. */ if (categ) objc_start_category_implementation (name, categ); else { objc_start_class_implementation (name, super); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_implementation (); } cp_parser_objc_method_definition_list (parser); } /* Consume the @end token and finish off the implementation. */ static void cp_parser_objc_end_implementation (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ objc_finish_implementation (); } /* Parse an Objective-C declaration. */ static void cp_parser_objc_declaration (cp_parser* parser, tree attributes) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); if (attributes) switch (kwd->keyword) { case RID_AT_ALIAS: case RID_AT_CLASS: case RID_AT_END: error_at (kwd->location, "attributes may not be specified before" " the %<@%D%> Objective-C++ keyword", kwd->u.value); attributes = NULL; break; case RID_AT_IMPLEMENTATION: warning_at (kwd->location, OPT_Wattributes, "prefix attributes are ignored before %<@%D%>", kwd->u.value); attributes = NULL; default: break; } switch (kwd->keyword) { case RID_AT_ALIAS: cp_parser_objc_alias_declaration (parser); break; case RID_AT_CLASS: cp_parser_objc_class_declaration (parser); break; case RID_AT_PROTOCOL: cp_parser_objc_protocol_declaration (parser, attributes); break; case RID_AT_INTERFACE: cp_parser_objc_class_interface (parser, attributes); break; case RID_AT_IMPLEMENTATION: cp_parser_objc_class_implementation (parser); break; case RID_AT_END: cp_parser_objc_end_implementation (parser); break; default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } } /* Parse an Objective-C try-catch-finally statement. objc-try-catch-finally-stmt: @try compound-statement objc-catch-clause-seq [opt] objc-finally-clause [opt] objc-catch-clause-seq: objc-catch-clause objc-catch-clause-seq [opt] objc-catch-clause: @catch ( objc-exception-declaration ) compound-statement objc-finally-clause: @finally compound-statement objc-exception-declaration: parameter-declaration '...' where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS. Returns NULL_TREE. PS: This function is identical to c_parser_objc_try_catch_finally_statement for C. Keep them in sync. */ static tree cp_parser_objc_try_catch_finally_statement (cp_parser *parser) { location_t location; tree stmt; cp_parser_require_keyword (parser, RID_AT_TRY, RT_AT_TRY); location = cp_lexer_peek_token (parser->lexer)->location; objc_maybe_warn_exceptions (location); /* NB: The @try block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); objc_begin_try_stmt (location, pop_stmt_list (stmt)); while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH)) { cp_parameter_declarator *parm; tree parameter_declaration = error_mark_node; bool seen_open_paren = false; cp_lexer_consume_token (parser->lexer); if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) seen_open_paren = true; if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* We have "@catch (...)" (where the '...' are literally what is in the code). Skip the '...'. parameter_declaration is set to NULL_TREE, and objc_being_catch_clauses() knows that that means '...'. */ cp_lexer_consume_token (parser->lexer); parameter_declaration = NULL_TREE; } else { /* We have "@catch (NSException *exception)" or something like that. Parse the parameter declaration. */ parm = cp_parser_parameter_declaration (parser, false, NULL); if (parm == NULL) parameter_declaration = error_mark_node; else parameter_declaration = grokdeclarator (parm->declarator, &parm->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); } if (seen_open_paren) cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); else { /* If there was no open parenthesis, we are recovering from an error, and we are trying to figure out what mistake the user has made. */ /* If there is an immediate closing parenthesis, the user probably forgot the opening one (ie, they typed "@catch NSException *e)". Parse the closing parenthesis and keep going. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) cp_lexer_consume_token (parser->lexer); /* If these is no immediate closing parenthesis, the user probably doesn't know that parenthesis are required at all (ie, they typed "@catch NSException *e"). So, just forget about the closing parenthesis and keep going. */ } objc_begin_catch_clause (parameter_declaration); cp_parser_compound_statement (parser, NULL, false, false); objc_finish_catch_clause (); } if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY)) { cp_lexer_consume_token (parser->lexer); location = cp_lexer_peek_token (parser->lexer)->location; /* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); objc_build_finally_clause (location, pop_stmt_list (stmt)); } return objc_finish_try_stmt (); } /* Parse an Objective-C synchronized statement. objc-synchronized-stmt: @synchronized ( expression ) compound-statement Returns NULL_TREE. */ static tree cp_parser_objc_synchronized_statement (cp_parser *parser) { location_t location; tree lock, stmt; cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, RT_AT_SYNCHRONIZED); location = cp_lexer_peek_token (parser->lexer)->location; objc_maybe_warn_exceptions (location); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); lock = cp_parser_expression (parser, false, NULL); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); return objc_build_synchronized (location, lock, pop_stmt_list (stmt)); } /* Parse an Objective-C throw statement. objc-throw-stmt: @throw assignment-expression [opt] ; Returns a constructed '@throw' statement. */ static tree cp_parser_objc_throw_statement (cp_parser *parser) { tree expr = NULL_TREE; location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require_keyword (parser, RID_AT_THROW, RT_AT_THROW); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); cp_parser_consume_semicolon_at_end_of_statement (parser); return objc_build_throw_stmt (loc, expr); } /* Parse an Objective-C statement. */ static tree cp_parser_objc_statement (cp_parser * parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->keyword) { case RID_AT_TRY: return cp_parser_objc_try_catch_finally_statement (parser); case RID_AT_SYNCHRONIZED: return cp_parser_objc_synchronized_statement (parser); case RID_AT_THROW: return cp_parser_objc_throw_statement (parser); default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* If we are compiling ObjC++ and we see an __attribute__ we neeed to look ahead to see if an objc keyword follows the attributes. This is to detect the use of prefix attributes on ObjC @interface and @protocol. */ static bool cp_parser_objc_valid_prefix_attributes (cp_parser* parser, tree *attrib) { cp_lexer_save_tokens (parser->lexer); *attrib = cp_parser_attributes_opt (parser); gcc_assert (*attrib); if (OBJC_IS_AT_KEYWORD (cp_lexer_peek_token (parser->lexer)->keyword)) { cp_lexer_commit_tokens (parser->lexer); return true; } cp_lexer_rollback_tokens (parser->lexer); return false; } /* This routine is a minimal replacement for c_parser_struct_declaration () used when parsing the list of types/names or ObjC++ properties. For example, when parsing the code @property (readonly) int a, b, c; this function is responsible for parsing "int a, int b, int c" and returning the declarations as CHAIN of DECLs. TODO: Share this code with cp_parser_objc_class_ivars. It's very similar parsing. */ static tree cp_parser_objc_struct_declaration (cp_parser *parser) { tree decls = NULL_TREE; cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &declspecs, &decl_class_or_enum_p); if (declspecs.type == error_mark_node) return error_mark_node; /* auto, register, static, extern, mutable. */ if (declspecs.storage_class != sc_none) { cp_parser_error (parser, "invalid type for property"); declspecs.storage_class = sc_none; } /* __thread. */ if (declspecs.specs[(int) ds_thread]) { cp_parser_error (parser, "invalid type for property"); declspecs.specs[(int) ds_thread] = 0; } /* typedef. */ if (declspecs.specs[(int) ds_typedef]) { cp_parser_error (parser, "invalid type for property"); declspecs.specs[(int) ds_typedef] = 0; } prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes, first_attribute, decl; cp_declarator *declarator; cp_token *token; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, NULL, NULL, false); /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); if (decl == error_mark_node || decl == NULL_TREE) return error_mark_node; /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; DECL_CHAIN (decl) = decls; decls = decl; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } else break; } return decls; } /* Parse an Objective-C @property declaration. The syntax is: objc-property-declaration: '@property' objc-property-attributes[opt] struct-declaration ; objc-property-attributes: '(' objc-property-attribute-list ')' objc-property-attribute-list: objc-property-attribute objc-property-attribute-list, objc-property-attribute objc-property-attribute 'getter' = identifier 'setter' = identifier 'readonly' 'readwrite' 'assign' 'retain' 'copy' 'nonatomic' For example: @property NSString *name; @property (readonly) id object; @property (retain, nonatomic, getter=getTheName) id name; @property int a, b, c; PS: This function is identical to c_parser_objc_at_property_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_property_declaration (cp_parser *parser) { /* The following variables hold the attributes of the properties as parsed. They are 'false' or 'NULL_TREE' if the attribute was not seen. When we see an attribute, we set them to 'true' (if they are boolean properties) or to the identifier (if they have an argument, ie, for getter and setter). Note that here we only parse the list of attributes, check the syntax and accumulate the attributes that we find. objc_add_property_declaration() will then process the information. */ bool property_assign = false; bool property_copy = false; tree property_getter_ident = NULL_TREE; bool property_nonatomic = false; bool property_readonly = false; bool property_readwrite = false; bool property_retain = false; tree property_setter_ident = NULL_TREE; /* 'properties' is the list of properties that we read. Usually a single one, but maybe more (eg, in "@property int a, b, c;" there are three). */ tree properties; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@property'. */ /* Parse the optional attribute list... */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { /* Eat the '('. */ cp_lexer_consume_token (parser->lexer); while (true) { bool syntax_error = false; cp_token *token = cp_lexer_peek_token (parser->lexer); enum rid keyword; if (token->type != CPP_NAME) { cp_parser_error (parser, "expected identifier"); break; } keyword = C_RID_CODE (token->u.value); cp_lexer_consume_token (parser->lexer); switch (keyword) { case RID_ASSIGN: property_assign = true; break; case RID_COPY: property_copy = true; break; case RID_NONATOMIC: property_nonatomic = true; break; case RID_READONLY: property_readonly = true; break; case RID_READWRITE: property_readwrite = true; break; case RID_RETAIN: property_retain = true; break; case RID_GETTER: case RID_SETTER: if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)) { if (keyword == RID_GETTER) cp_parser_error (parser, "missing %<=%> (after %<getter%> attribute)"); else cp_parser_error (parser, "missing %<=%> (after %<setter%> attribute)"); syntax_error = true; break; } cp_lexer_consume_token (parser->lexer); /* eat the = */ if (!cp_parser_objc_selector_p (cp_lexer_peek_token (parser->lexer)->type)) { cp_parser_error (parser, "expected identifier"); syntax_error = true; break; } if (keyword == RID_SETTER) { if (property_setter_ident != NULL_TREE) { cp_parser_error (parser, "the %<setter%> attribute may only be specified once"); cp_lexer_consume_token (parser->lexer); } else property_setter_ident = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) cp_parser_error (parser, "setter name must terminate with %<:%>"); else cp_lexer_consume_token (parser->lexer); } else { if (property_getter_ident != NULL_TREE) { cp_parser_error (parser, "the %<getter%> attribute may only be specified once"); cp_lexer_consume_token (parser->lexer); } else property_getter_ident = cp_parser_objc_selector (parser); } break; default: cp_parser_error (parser, "unknown property attribute"); syntax_error = true; break; } if (syntax_error) break; if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } /* FIXME: "@property (setter, assign);" will generate a spurious "error: expected ‘)’ before ‘,’ token". This is because cp_parser_require, unlike the C counterpart, will produce an error even if we are in error recovery. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } /* ... and the property declaration(s). */ properties = cp_parser_objc_struct_declaration (parser); if (properties == error_mark_node) { cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); return; } if (properties == NULL_TREE) cp_parser_error (parser, "expected identifier"); else { /* Comma-separated properties are chained together in reverse order; add them one by one. */ properties = nreverse (properties); for (; properties; properties = TREE_CHAIN (properties)) objc_add_property_declaration (loc, copy_node (properties), property_readonly, property_readwrite, property_assign, property_retain, property_copy, property_nonatomic, property_getter_ident, property_setter_ident); } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C++ @synthesize declaration. The syntax is: objc-synthesize-declaration: @synthesize objc-synthesize-identifier-list ; objc-synthesize-identifier-list: objc-synthesize-identifier objc-synthesize-identifier-list, objc-synthesize-identifier objc-synthesize-identifier identifier identifier = identifier For example: @synthesize MyProperty; @synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty; PS: This function is identical to c_parser_objc_at_synthesize_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_synthesize_declaration (cp_parser *parser) { tree list = NULL_TREE; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@synthesize'. */ while (true) { tree property, ivar; property = cp_parser_identifier (parser); if (property == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { cp_lexer_consume_token (parser->lexer); ivar = cp_parser_identifier (parser); if (ivar == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } } else ivar = NULL_TREE; list = chainon (list, build_tree_list (ivar, property)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); objc_add_synthesize_declaration (loc, list); } /* Parse an Objective-C++ @dynamic declaration. The syntax is: objc-dynamic-declaration: @dynamic identifier-list ; For example: @dynamic MyProperty; @dynamic MyProperty, AnotherProperty; PS: This function is identical to c_parser_objc_at_dynamic_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_dynamic_declaration (cp_parser *parser) { tree list = NULL_TREE; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@dynamic'. */ while (true) { tree property; property = cp_parser_identifier (parser); if (property == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } list = chainon (list, build_tree_list (NULL, property)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); objc_add_dynamic_declaration (loc, list); } /* OpenMP 2.5 parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause cp_parser_omp_clause_name (cp_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE)) result = PRAGMA_OMP_CLAUSE_PRIVATE; else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp ("final", p)) result = PRAGMA_OMP_CLAUSE_FINAL; else if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'm': if (!strcmp ("mergeable", p)) result = PRAGMA_OMP_CLAUSE_MERGEABLE; break; case 'n': if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; case 'u': if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) cp_lexer_consume_token (parser->lexer); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name, location_t location) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { error_at (location, "too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier In addition, we match a closing parenthesis. An opening parenthesis will have been consumed by the caller. If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind, tree list) { cp_token *token; while (1) { tree name, decl; token = cp_lexer_peek_token (parser->lexer); name = cp_parser_id_expression (parser, /*template_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (name == error_mark_node) goto skip_comma; decl = cp_parser_lookup_name_simple (parser, name, token->location); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, name, decl, NLE_NULL, token->location); else if (kind != 0) { tree u = build_omp_clause (token->location, kind); OMP_CLAUSE_DECL (u) = decl; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (decl, NULL_TREE, list); get_comma: if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { int ending; /* Try to resync to an unnested comma. Copied from cp_parser_parenthesized_expression_list. */ skip_comma: ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list) { if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return cp_parser_omp_var_list_no_open (parser, kind, list); return list; } /* OpenMP 3.0: collapse ( constant-expression ) */ static tree cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location) { tree c, num; location_t loc; HOST_WIDE_INT n; loc = cp_lexer_peek_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; num = cp_parser_constant_expression (parser, false, NULL); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (num == error_mark_node) return list; num = fold_non_dependent_expr (num); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !host_integerp (num, 0) || (n = tree_low_cst (num, 0)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse", location); c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_COLLAPSE_EXPR (c) = num; return c; } /* OpenMP 2.5: default ( shared | none ) */ static tree cp_parser_omp_clause_default (cp_parser *parser, tree list, location_t location) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; tree c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } cp_lexer_consume_token (parser->lexer); } else { invalid_kind: cp_parser_error (parser, "expected %<none%> or %<shared%>"); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default", location); c = build_omp_clause (location, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 3.1: final ( expression ) */ static tree cp_parser_omp_clause_final (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final", location); c = build_omp_clause (location, OMP_CLAUSE_FINAL); OMP_CLAUSE_FINAL_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: if ( expression ) */ static tree cp_parser_omp_clause_if (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if", location); c = build_omp_clause (location, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 3.1: mergeable */ static tree cp_parser_omp_clause_mergeable (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable", location); c = build_omp_clause (location, OMP_CLAUSE_MERGEABLE); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: nowait */ static tree cp_parser_omp_clause_nowait (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait", location); c = build_omp_clause (location, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree cp_parser_omp_clause_num_threads (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_expression (parser, false, NULL); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads", location); c = build_omp_clause (location, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: ordered */ static tree cp_parser_omp_clause_ordered (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered", location); c = build_omp_clause (location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || OpenMP 3.1: reduction-operator: One of: + * - & ^ | && || min max */ static tree cp_parser_omp_clause_reduction (cp_parser *parser, tree list) { enum tree_code code; tree nlist, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; case CPP_NAME: { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "min") == 0) { code = MIN_EXPR; break; } if (strcmp (p, "max") == 0) { code = MAX_EXPR; break; } } /* FALLTHROUGH */ default: cp_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, %<^%>, " "%<|%>, %<&&%>, %<||%>, %<min%> or %<max%>"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) goto resync_fail; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list); for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_REDUCTION_CODE (c) = code; return nlist; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto */ static tree cp_parser_omp_clause_schedule (cp_parser *parser, tree list, location_t location) { tree c, t; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; c = build_omp_clause (location, OMP_CLAUSE_SCHEDULE); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); t = cp_parser_assignment_expression (parser, false, NULL); if (t == error_mark_node) goto resync_fail; else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (token->location, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (token->location, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) goto resync_fail; } else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN)) goto resync_fail; check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule", location); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: cp_parser_error (parser, "invalid schedule kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* OpenMP 3.0: untied */ static tree cp_parser_omp_clause_untied (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied", location); c = build_omp_clause (location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask, const char *where, cp_token *pragma_tok) { tree clauses = NULL; bool first = true; cp_token *token = NULL; while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) { pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); c_kind = cp_parser_omp_clause_name (parser); first = false; switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = cp_parser_omp_clause_collapse (parser, clauses, token->location); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = cp_parser_omp_clause_default (parser, clauses, token->location); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FINAL: clauses = cp_parser_omp_clause_final (parser, clauses, token->location); c_name = "final"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = cp_parser_omp_clause_if (parser, clauses, token->location); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_MERGEABLE: clauses = cp_parser_omp_clause_mergeable (parser, clauses, token->location); c_name = "mergeable"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = cp_parser_omp_clause_nowait (parser, clauses, token->location); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = cp_parser_omp_clause_num_threads (parser, clauses, token->location); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = cp_parser_omp_clause_ordered (parser, clauses, token->location); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = cp_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = cp_parser_omp_clause_schedule (parser, clauses, token->location); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = cp_parser_omp_clause_untied (parser, clauses, token->location); c_name = "nowait"; break; default: cp_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (token->location, "%qs is not valid for %qs", c_name, where); } } saw_error: cp_parser_skip_to_pragma_eol (parser, pragma_tok); return finish_omp_clauses (clauses); } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that cp_parser_statement calls add_stmt. */ static unsigned cp_parser_begin_omp_structured_block (cp_parser *parser) { unsigned save = parser->in_statement; /* Only move the values to IN_OMP_BLOCK if they weren't false. This preserves the "not within loop or switch" style error messages for nonsense cases like void foo() { #pragma omp single break; } */ if (parser->in_statement) parser->in_statement = IN_OMP_BLOCK; return save; } static void cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save) { parser->in_statement = save; } static tree cp_parser_omp_structured_block (cp_parser *parser) { tree stmt = begin_omp_structured_block (); unsigned int save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); return finish_omp_structured_block (stmt); } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. OpenMP 3.1: # pragma omp atomic new-line update-stmt # pragma omp atomic read new-line read-stmt # pragma omp atomic write new-line write-stmt # pragma omp atomic update new-line update-stmt # pragma omp atomic capture new-line capture-stmt # pragma omp atomic capture new-line capture-block read-stmt: v = x write-stmt: x = expr update-stmt: expression-stmt | x = x binop expr capture-stmt: v = x binop= expr | v = x++ | v = ++x | v = x-- | v = --x capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } where x and v are lvalue expressions with scalar type. */ static void cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok) { tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE, lhs1 = NULL_TREE; tree rhs1 = NULL_TREE, orig_lhs; enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR; bool structured_block = false; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (!strcmp (p, "read")) code = OMP_ATOMIC_READ; else if (!strcmp (p, "write")) code = NOP_EXPR; else if (!strcmp (p, "update")) code = OMP_ATOMIC; else if (!strcmp (p, "capture")) code = OMP_ATOMIC_CAPTURE_NEW; else p = NULL; if (p) cp_lexer_consume_token (parser->lexer); } cp_parser_require_pragma_eol (parser, pragma_tok); switch (code) { case OMP_ATOMIC_READ: case NOP_EXPR: /* atomic write */ v = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; if (code == NOP_EXPR) lhs = cp_parser_expression (parser, /*cast_p=*/false, NULL); else lhs = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (lhs == error_mark_node) goto saw_error; if (code == NOP_EXPR) { /* atomic write is represented by OMP_ATOMIC with NOP_EXPR opcode. */ code = OMP_ATOMIC; rhs = lhs; lhs = v; v = NULL_TREE; } goto done; case OMP_ATOMIC_CAPTURE_NEW: if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_consume_token (parser->lexer); structured_block = true; } else { v = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; } default: break; } restart: lhs = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); orig_lhs = lhs; switch (TREE_CODE (lhs)) { case ERROR_MARK: goto saw_error; case POSTINCREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); opcode = PLUS_EXPR; rhs = integer_one_node; break; case POSTDECREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); opcode = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } } /* FALLTHRU */ default: switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_MULT_EQ: opcode = MULT_EXPR; break; case CPP_DIV_EQ: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: opcode = PLUS_EXPR; break; case CPP_MINUS_EQ: opcode = MINUS_EXPR; break; case CPP_LSHIFT_EQ: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: opcode = RSHIFT_EXPR; break; case CPP_AND_EQ: opcode = BIT_AND_EXPR; break; case CPP_OR_EQ: opcode = BIT_IOR_EXPR; break; case CPP_XOR_EQ: opcode = BIT_XOR_EXPR; break; case CPP_EQ: if (structured_block || code == OMP_ATOMIC) { enum cp_parser_prec oprec; cp_token *token; cp_lexer_consume_token (parser->lexer); rhs1 = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (rhs1 == error_mark_node) goto saw_error; token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_SEMICOLON: if (code == OMP_ATOMIC_CAPTURE_NEW) { code = OMP_ATOMIC_CAPTURE_OLD; v = lhs; lhs = NULL_TREE; lhs1 = rhs1; rhs1 = NULL_TREE; cp_lexer_consume_token (parser->lexer); goto restart; } cp_parser_error (parser, "invalid form of %<#pragma omp atomic%>"); goto saw_error; case CPP_MULT: opcode = MULT_EXPR; break; case CPP_DIV: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS: opcode = PLUS_EXPR; break; case CPP_MINUS: opcode = MINUS_EXPR; break; case CPP_LSHIFT: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT: opcode = RSHIFT_EXPR; break; case CPP_AND: opcode = BIT_AND_EXPR; break; case CPP_OR: opcode = BIT_IOR_EXPR; break; case CPP_XOR: opcode = BIT_XOR_EXPR; break; default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } oprec = TOKEN_PRECEDENCE (token); gcc_assert (oprec != PREC_NOT_OPERATOR); if (commutative_tree_code (opcode)) oprec = (enum cp_parser_prec) (oprec - 1); cp_lexer_consume_token (parser->lexer); rhs = cp_parser_binary_expression (parser, false, false, oprec, NULL); if (rhs == error_mark_node) goto saw_error; goto stmt_done; } /* FALLTHROUGH */ default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } cp_lexer_consume_token (parser->lexer); rhs = cp_parser_expression (parser, false, NULL); if (rhs == error_mark_node) goto saw_error; break; } stmt_done: if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) goto saw_error; v = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; lhs1 = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (lhs1 == error_mark_node) goto saw_error; } if (structured_block) { cp_parser_consume_semicolon_at_end_of_statement (parser); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } done: finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1); if (!structured_block) cp_parser_consume_semicolon_at_end_of_statement (parser); return; saw_error: cp_parser_skip_to_end_of_block_or_statement (parser); if (structured_block) { if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) cp_lexer_consume_token (parser->lexer); else if (code == OMP_ATOMIC_CAPTURE_NEW) { cp_parser_skip_to_end_of_block_or_statement (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) cp_lexer_consume_token (parser->lexer); } } } /* OpenMP 2.5: # pragma omp barrier new-line */ static void cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_barrier (); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block */ static tree cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok) { tree stmt, name = NULL; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); name = cp_parser_identifier (parser); if (name == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (name == error_mark_node) name = NULL; } cp_parser_require_pragma_eol (parser, pragma_tok); stmt = cp_parser_omp_structured_block (parser); return c_finish_omp_critical (input_location, stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) (void) cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_flush (); } /* Helper function, to parse omp for increment expression. */ static tree cp_parser_omp_for_cond (cp_parser *parser, tree decl) { tree cond = cp_parser_binary_expression (parser, false, true, PREC_NOT_OPERATOR, NULL); if (cond == error_mark_node || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; default: return error_mark_node; } /* If decl is an iterator, preserve LHS and RHS of the relational expr until finish_omp_for. */ if (decl && (type_dependent_expression_p (decl) || CLASS_TYPE_P (TREE_TYPE (decl)))) return cond; return build_x_binary_op (TREE_CODE (cond), TREE_OPERAND (cond, 0), ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, /*overload=*/NULL, tf_warning_or_error); } /* Helper function, to parse omp for increment expression. */ static tree cp_parser_omp_for_incr (cp_parser *parser, tree decl) { cp_token *token = cp_lexer_peek_token (parser->lexer); enum tree_code op; tree lhs, rhs; cp_id_kind idk; bool decl_first; if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS) { op = (token->type == CPP_PLUS_PLUS ? PREINCREMENT_EXPR : PREDECREMENT_EXPR); cp_lexer_consume_token (parser->lexer); lhs = cp_parser_cast_expression (parser, false, false, NULL); if (lhs != decl) return error_mark_node; return build2 (op, TREE_TYPE (decl), decl, NULL_TREE); } lhs = cp_parser_primary_expression (parser, false, false, false, &idk); if (lhs != decl) return error_mark_node; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS) { op = (token->type == CPP_PLUS_PLUS ? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR); cp_lexer_consume_token (parser->lexer); return build2 (op, TREE_TYPE (decl), decl, NULL_TREE); } op = cp_parser_assignment_operator_opt (parser); if (op == ERROR_MARK) return error_mark_node; if (op != NOP_EXPR) { rhs = cp_parser_assignment_expression (parser, false, NULL); rhs = build2 (op, TREE_TYPE (decl), decl, rhs); return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs); } lhs = cp_parser_binary_expression (parser, false, false, PREC_ADDITIVE_EXPRESSION, NULL); token = cp_lexer_peek_token (parser->lexer); decl_first = lhs == decl; if (decl_first) lhs = NULL_TREE; if (token->type != CPP_PLUS && token->type != CPP_MINUS) return error_mark_node; do { op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR; cp_lexer_consume_token (parser->lexer); rhs = cp_parser_binary_expression (parser, false, false, PREC_ADDITIVE_EXPRESSION, NULL); token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first) { if (lhs == NULL_TREE) { if (op == PLUS_EXPR) lhs = rhs; else lhs = build_x_unary_op (NEGATE_EXPR, rhs, tf_warning_or_error); } else lhs = build_x_binary_op (op, lhs, ERROR_MARK, rhs, ERROR_MARK, NULL, tf_warning_or_error); } } while (token->type == CPP_PLUS || token->type == CPP_MINUS); if (!decl_first) { if (rhs != decl || op == MINUS_EXPR) return error_mark_node; rhs = build2 (op, TREE_TYPE (decl), lhs, decl); } else rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs); return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs); } /* Parse the restricted form of the for statement allowed by OpenMP. */ static tree cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses) { tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret; tree real_decl, initv, condv, incrv, declv; tree this_pre_body, cl; location_t loc_first; bool collapse_err = false; int i, collapse = 1, nbraces = 0; VEC(tree,gc) *for_block = make_tree_vector (); for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); gcc_assert (collapse >= 1); declv = make_tree_vec (collapse); initv = make_tree_vec (collapse); condv = make_tree_vec (collapse); incrv = make_tree_vec (collapse); loc_first = cp_lexer_peek_token (parser->lexer)->location; for (i = 0; i < collapse; i++) { int bracecount = 0; bool add_private_clause = false; location_t loc; if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_parser_error (parser, "for statement expected"); return NULL; } loc = cp_lexer_consume_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return NULL; init = decl = real_decl = NULL; this_pre_body = push_stmt_list (); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { /* See 2.5.1 (in OpenMP 3.0, similar wording is in 2.5 standard too): init-expr: var = lb integer-type var = lb random-access-iterator-type var = lb pointer-type var = lb */ cp_decl_specifier_seq type_specifiers; /* First, try to parse as an initialized declaration. See cp_parser_condition, from whence the bulk of this is copied. */ cp_parser_parse_tentatively (parser); cp_parser_type_specifier_seq (parser, /*is_declaration=*/true, /*is_trailing_return=*/false, &type_specifiers); if (cp_parser_parse_definitely (parser)) { /* If parsing a type specifier seq succeeded, then this MUST be a initialized declaration. */ tree asm_specification, attributes; cp_declarator *declarator; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); attributes = cp_parser_attributes_opt (parser); asm_specification = cp_parser_asm_specification_opt (parser); if (declarator == cp_error_declarator) cp_parser_skip_to_end_of_statement (parser); else { tree pushed_scope, auto_node; decl = start_decl (declarator, &type_specifiers, SD_INITIALIZED, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); auto_node = type_uses_auto (TREE_TYPE (decl)); if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) error ("parenthesized initialization is not allowed in " "OpenMP %<for%> loop"); else /* Trigger an error. */ cp_parser_require (parser, CPP_EQ, RT_EQ); init = error_mark_node; cp_parser_skip_to_end_of_statement (parser); } else if (CLASS_TYPE_P (TREE_TYPE (decl)) || type_dependent_expression_p (decl) || auto_node) { bool is_direct_init, is_non_constant_init; init = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); if (auto_node) { TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), init, auto_node); if (!CLASS_TYPE_P (TREE_TYPE (decl)) && !type_dependent_expression_p (decl)) goto non_class; } cp_finish_decl (decl, init, !is_non_constant_init, asm_specification, LOOKUP_ONLYCONVERTING); if (CLASS_TYPE_P (TREE_TYPE (decl))) { VEC_safe_push (tree, gc, for_block, this_pre_body); init = NULL_TREE; } else init = pop_stmt_list (this_pre_body); this_pre_body = NULL_TREE; } else { /* Consume '='. */ cp_lexer_consume_token (parser->lexer); init = cp_parser_assignment_expression (parser, false, NULL); non_class: if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE) init = error_mark_node; else cp_finish_decl (decl, NULL_TREE, /*init_const_expr_p=*/false, asm_specification, LOOKUP_ONLYCONVERTING); } if (pushed_scope) pop_scope (pushed_scope); } } else { cp_id_kind idk; /* If parsing a type specifier sequence failed, then this MUST be a simple expression. */ cp_parser_parse_tentatively (parser); decl = cp_parser_primary_expression (parser, false, false, false, &idk); if (!cp_parser_error_occurred (parser) && decl && DECL_P (decl) && CLASS_TYPE_P (TREE_TYPE (decl))) { tree rhs; cp_parser_parse_definitely (parser); cp_parser_require (parser, CPP_EQ, RT_EQ); rhs = cp_parser_assignment_expression (parser, false, NULL); finish_expr_stmt (build_x_modify_expr (decl, NOP_EXPR, rhs, tf_warning_or_error)); add_private_clause = true; } else { decl = NULL; cp_parser_abort_tentative_parse (parser); init = cp_parser_expression (parser, false, NULL); if (init) { if (TREE_CODE (init) == MODIFY_EXPR || TREE_CODE (init) == MODOP_EXPR) real_decl = TREE_OPERAND (init, 0); } } } } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (this_pre_body) { this_pre_body = pop_stmt_list (this_pre_body); if (pre_body) { tree t = pre_body; pre_body = push_stmt_list (); add_stmt (t); add_stmt (this_pre_body); pre_body = pop_stmt_list (pre_body); } else pre_body = this_pre_body; } if (decl) real_decl = decl; if (par_clauses != NULL && real_decl != NULL_TREE) { tree *c; for (c = par_clauses; *c ; ) if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) { error_at (loc, "iteration variable %qD" " should not be firstprivate", real_decl); *c = OMP_CLAUSE_CHAIN (*c); } else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) { /* Add lastprivate (decl) clause to OMP_FOR_CLAUSES, change it to shared (decl) in OMP_PARALLEL_CLAUSES. */ tree l = build_omp_clause (loc, OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (l) = real_decl; OMP_CLAUSE_CHAIN (l) = clauses; CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c); clauses = l; OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED); CP_OMP_CLAUSE_INFO (*c) = NULL; add_private_clause = false; } else { if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) add_private_clause = false; c = &OMP_CLAUSE_CHAIN (*c); } } if (add_private_clause) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) && OMP_CLAUSE_DECL (c) == decl) break; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (c) == decl) error_at (loc, "iteration variable %qD " "should not be firstprivate", decl); else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_DECL (c) == decl) error_at (loc, "iteration variable %qD should not be reduction", decl); } if (c == NULL) { c = build_omp_clause (loc, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; c = finish_omp_clauses (c); if (c) { OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } } cond = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cond = cp_parser_omp_for_cond (parser, decl); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); incr = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) { /* If decl is an iterator, preserve the operator on decl until finish_omp_for. */ if (real_decl && ((processing_template_decl && !POINTER_TYPE_P (TREE_TYPE (real_decl))) || CLASS_TYPE_P (TREE_TYPE (real_decl)))) incr = cp_parser_omp_for_incr (parser, real_decl); else incr = cp_parser_expression (parser, false, NULL); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; if (i == collapse - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ cp_parser_parse_tentatively (parser); do { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) break; else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_consume_token (parser->lexer); bracecount++; } else if (bracecount && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); else { loc = cp_lexer_peek_token (parser->lexer)->location; error_at (loc, "not enough collapsed for loops"); collapse_err = true; cp_parser_abort_tentative_parse (parser); declv = NULL_TREE; break; } } while (1); if (declv) { cp_parser_parse_definitely (parser); nbraces += bracecount; } } /* Note that we saved the original contents of this flag when we entered the structured block, and so we don't need to re-save it here. */ parser->in_statement = IN_OMP_FOR; /* Note that the grammar doesn't call for a structured block here, though the loop as a whole is a structured block. */ body = push_stmt_list (); cp_parser_statement (parser, NULL_TREE, false, NULL); body = pop_stmt_list (body); if (declv == NULL_TREE) ret = NULL_TREE; else ret = finish_omp_for (loc_first, declv, initv, condv, incrv, body, pre_body, clauses); while (nbraces) { if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { cp_lexer_consume_token (parser->lexer); nbraces--; } else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); else { if (!collapse_err) { error_at (cp_lexer_peek_token (parser->lexer)->location, "collapsed loops not perfectly nested"); } collapse_err = true; cp_parser_statement_seq_opt (parser, NULL); if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) break; } } while (!VEC_empty (tree, for_block)) add_stmt (pop_stmt_list (VEC_pop (tree, for_block))); release_tree_vector (for_block); return ret; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT) \ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok) { tree clauses, sb, ret; unsigned int save; clauses = cp_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK, "#pragma omp for", pragma_tok); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_for_loop (parser, clauses, NULL); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block */ static tree cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_master (input_location, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block */ static tree cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_ordered (loc, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block */ static tree cp_parser_omp_sections_scope (cp_parser *parser) { tree stmt, substmt; bool error_suppress = false; cp_token *tok; if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) return NULL_TREE; stmt = push_stmt_list (); if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION) { unsigned save; substmt = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); while (1) { cp_parser_statement (parser, NULL_TREE, false, NULL); tok = cp_lexer_peek_token (parser->lexer); if (tok->pragma_kind == PRAGMA_OMP_SECTION) break; if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; } cp_parser_end_omp_structured_block (parser, save); substmt = finish_omp_structured_block (substmt); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } while (1) { tok = cp_lexer_peek_token (parser->lexer); if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; if (tok->pragma_kind == PRAGMA_OMP_SECTION) { cp_lexer_consume_token (parser->lexer); cp_parser_require_pragma_eol (parser, tok); error_suppress = false; } else if (!error_suppress) { cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = cp_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; add_stmt (stmt); return stmt; } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok) { tree clauses, ret; clauses = cp_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK, "#pragma omp sections", pragma_tok); ret = cp_parser_omp_sections_scope (parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; return ret; } /* OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma parallel for parallel-for-clause new-line # pragma parallel sections parallel-sections-clause new-line */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = "#pragma omp parallel"; tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_lexer_consume_token (parser->lexer); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = "#pragma omp parallel for"; mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "sections") == 0) { cp_lexer_consume_token (parser->lexer); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = "#pragma omp parallel sections"; mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); switch (p_kind) { case PRAGMA_OMP_PARALLEL: cp_parser_statement (parser, NULL_TREE, false, NULL); par_clause = clauses; break; case PRAGMA_OMP_PARALLEL_FOR: c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); cp_parser_omp_for_loop (parser, ws_clause, &par_clause); break; case PRAGMA_OMP_PARALLEL_SECTIONS: c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); stmt = cp_parser_omp_sections_scope (parser); if (stmt) OMP_SECTIONS_CLAUSES (stmt) = ws_clause; break; default: gcc_unreachable (); } cp_parser_end_omp_structured_block (parser, save); stmt = finish_omp_parallel (par_clause, block); if (p_kind != PRAGMA_OMP_PARALLEL) OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok) { tree stmt = make_node (OMP_SINGLE); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single", pragma_tok); OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 3.0: # pragma omp task task-clause[optseq] new-line structured-block */ #define OMP_TASK_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_FINAL) \ | (1u << PRAGMA_OMP_CLAUSE_MERGEABLE)) static tree cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok) { tree clauses, block; unsigned int save; clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task", pragma_tok); block = begin_omp_task (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); return finish_omp_task (clauses, block); } /* OpenMP 3.0: # pragma omp taskwait new-line */ static void cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_taskwait (); } /* OpenMP 3.1: # pragma omp taskyield new-line */ static void cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_taskyield (); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok) { tree vars; vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_threadprivate (vars); } /* Main entry point to OpenMP statement pragmas. */ static void cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok) { tree stmt; switch (pragma_tok->pragma_kind) { case PRAGMA_OMP_ATOMIC: cp_parser_omp_atomic (parser, pragma_tok); return; case PRAGMA_OMP_CRITICAL: stmt = cp_parser_omp_critical (parser, pragma_tok); break; case PRAGMA_OMP_FOR: stmt = cp_parser_omp_for (parser, pragma_tok); break; case PRAGMA_OMP_MASTER: stmt = cp_parser_omp_master (parser, pragma_tok); break; case PRAGMA_OMP_ORDERED: stmt = cp_parser_omp_ordered (parser, pragma_tok); break; case PRAGMA_OMP_PARALLEL: stmt = cp_parser_omp_parallel (parser, pragma_tok); break; case PRAGMA_OMP_SECTIONS: stmt = cp_parser_omp_sections (parser, pragma_tok); break; case PRAGMA_OMP_SINGLE: stmt = cp_parser_omp_single (parser, pragma_tok); break; case PRAGMA_OMP_TASK: stmt = cp_parser_omp_task (parser, pragma_tok); break; default: gcc_unreachable (); } if (stmt) SET_EXPR_LOCATION (stmt, pragma_tok->location); } /* Transactional Memory parsing routines. */ /* Parse a transaction attribute. txn-attribute: attribute [ [ identifier ] ] ??? Simplify this when C++0x bracket attributes are implemented properly. */ static tree cp_parser_txn_attribute_opt (cp_parser *parser) { cp_token *token; tree attr_name, attr = NULL; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) return cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) return NULL_TREE; cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE)) goto error1; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { token = cp_lexer_consume_token (parser->lexer); attr_name = (token->type == CPP_KEYWORD /* For keywords, use the canonical spelling, not the parsed identifier. */ ? ridpointers[(int) token->keyword] : token->u.value); attr = build_tree_list (attr_name, NULL_TREE); } else cp_parser_error (parser, "expected identifier"); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); error1: cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return attr; } /* Parse a __transaction_atomic or __transaction_relaxed statement. transaction-statement: __transaction_atomic txn-attribute[opt] txn-noexcept-spec[opt] compound-statement __transaction_relaxed txn-noexcept-spec[opt] compound-statement */ static tree cp_parser_transaction (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char this_in = 1, new_in; cp_token *token; tree stmt, attrs, noex; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = cp_parser_txn_attribute_opt (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } /* Parse a noexcept specification. */ noex = cp_parser_noexcept_specification_opt (parser, true, NULL, true); /* Keep track if we're in the lexical scope of an outer transaction. */ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER); stmt = begin_transaction_stmt (token->location, NULL, this_in); parser->in_transaction = new_in; cp_parser_compound_statement (parser, NULL, false, false); parser->in_transaction = old_in; finish_transaction_stmt (stmt, NULL, this_in, noex); return stmt; } /* Parse a __transaction_atomic or __transaction_relaxed expression. transaction-expression: __transaction_atomic txn-noexcept-spec[opt] ( expression ) __transaction_relaxed txn-noexcept-spec[opt] ( expression ) */ static tree cp_parser_transaction_expression (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char this_in = 1; cp_token *token; tree expr, noex; bool noex_expr; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); if (!flag_tm) error (keyword == RID_TRANSACTION_RELAXED ? G_("%<__transaction_relaxed%> without transactional memory " "support enabled") : G_("%<__transaction_atomic%> without transactional memory " "support enabled")); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; /* Set this early. This might mean that we allow transaction_cancel in an expression that we find out later actually has to be a constexpr. However, we expect that cxx_constant_value will be able to deal with this; also, if the noexcept has no constexpr, then what we parse next really is a transaction's body. */ parser->in_transaction = this_in; /* Parse a noexcept specification. */ noex = cp_parser_noexcept_specification_opt (parser, false, &noex_expr, true); if (!noex || !noex_expr || cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); finish_parenthesized_expr (expr); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } else { /* The only expression that is available got parsed for the noexcept already. noexcept is true then. */ expr = noex; noex = boolean_true_node; } expr = build_transaction_expr (token->location, expr, this_in, noex); parser->in_transaction = old_in; if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION)) return error_mark_node; return (flag_tm ? expr : error_mark_node); } /* Parse a function-transaction-block. function-transaction-block: __transaction_atomic txn-attribute[opt] ctor-initializer[opt] function-body __transaction_atomic txn-attribute[opt] function-try-block __transaction_relaxed ctor-initializer[opt] function-body __transaction_relaxed function-try-block */ static bool cp_parser_function_transaction (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char new_in = 1; tree compound_stmt, stmt, attrs; bool ctor_initializer_p; cp_token *token; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) new_in |= TM_STMT_ATTR_RELAXED; else { attrs = cp_parser_txn_attribute_opt (parser); if (attrs) new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in); parser->in_transaction = new_in; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); parser->in_transaction = old_in; finish_transaction_stmt (stmt, compound_stmt, new_in, NULL_TREE); return ctor_initializer_p; } /* Parse a __transaction_cancel statement. cancel-statement: __transaction_cancel txn-attribute[opt] ; __transaction_cancel txn-attribute[opt] throw-expression ; ??? Cancel and throw is not yet implemented. */ static tree cp_parser_transaction_cancel (cp_parser *parser) { cp_token *token; bool is_outer = false; tree stmt, attrs; token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL, RT_TRANSACTION_CANCEL); gcc_assert (token != NULL); attrs = cp_parser_txn_attribute_opt (parser); if (attrs) is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0); /* ??? Parse cancel-and-throw here. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (!flag_tm) { error_at (token->location, "%<__transaction_cancel%> without " "transactional memory support enabled"); return error_mark_node; } else if (parser->in_transaction & TM_STMT_ATTR_RELAXED) { error_at (token->location, "%<__transaction_cancel%> within a " "%<__transaction_relaxed%>"); return error_mark_node; } else if (is_outer) { if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0 && !is_tm_may_cancel_outer (current_function_decl)) { error_at (token->location, "outer %<__transaction_cancel%> not " "within outer %<__transaction_atomic%>"); error_at (token->location, " or a %<transaction_may_cancel_outer%> function"); return error_mark_node; } } else if (parser->in_transaction == 0) { error_at (token->location, "%<__transaction_cancel%> not within " "%<__transaction_atomic%>"); return error_mark_node; } stmt = build_tm_abort_call (token->location, is_outer); add_stmt (stmt); finish_stmt (); return stmt; } /* The parser. */ static GTY (()) cp_parser *the_parser; /* Special handling for the first token or line in the file. The first thing in the file might be #pragma GCC pch_preprocess, which loads a PCH file, which is a GC collection point. So we need to handle this first pragma without benefit of an existing lexer structure. Always returns one token to the caller in *FIRST_TOKEN. This is either the true first token of the file, or the first token after the initial pragma. */ static void cp_parser_initial_pragma (cp_token *first_token) { tree name = NULL; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS) return; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type == CPP_STRING) { name = first_token->u.value; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type != CPP_PRAGMA_EOL) error_at (first_token->location, "junk at end of %<#pragma GCC pch_preprocess%>"); } else error_at (first_token->location, "expected string literal"); /* Skip to the end of the pragma. */ while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF) cp_lexer_get_preprocessor_token (NULL, first_token); /* Now actually load the PCH file. */ if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); /* Read one more token to return to our caller. We have to do this after reading the PCH file in, since its pointers have to be live. */ cp_lexer_get_preprocessor_token (NULL, first_token); } /* Normal parsing of a pragma token. Here we can (and must) use the regular lexer. */ static bool cp_parser_pragma (cp_parser *parser, enum pragma_context context) { cp_token *pragma_tok; unsigned int id; pragma_tok = cp_lexer_consume_token (parser->lexer); gcc_assert (pragma_tok->type == CPP_PRAGMA); parser->lexer->in_pragma = true; id = pragma_tok->pragma_kind; switch (id) { case PRAGMA_GCC_PCH_PREPROCESS: error_at (pragma_tok->location, "%<#pragma GCC pch_preprocess%> must be first"); break; case PRAGMA_OMP_BARRIER: switch (context) { case pragma_compound: cp_parser_omp_barrier (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp barrier%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_FLUSH: switch (context) { case pragma_compound: cp_parser_omp_flush (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp flush%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_TASKWAIT: switch (context) { case pragma_compound: cp_parser_omp_taskwait (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp taskwait%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_TASKYIELD: switch (context) { case pragma_compound: cp_parser_omp_taskyield (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp taskyield%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_THREADPRIVATE: cp_parser_omp_threadprivate (parser, pragma_tok); return false; case PRAGMA_OMP_ATOMIC: case PRAGMA_OMP_CRITICAL: case PRAGMA_OMP_FOR: case PRAGMA_OMP_MASTER: case PRAGMA_OMP_ORDERED: case PRAGMA_OMP_PARALLEL: case PRAGMA_OMP_SECTIONS: case PRAGMA_OMP_SINGLE: case PRAGMA_OMP_TASK: if (context == pragma_external) goto bad_stmt; cp_parser_omp_construct (parser, pragma_tok); return true; case PRAGMA_OMP_SECTION: error_at (pragma_tok->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); break; default: gcc_assert (id >= PRAGMA_FIRST_EXTERNAL); c_invoke_pragma_handler (id); break; bad_stmt: cp_parser_error (parser, "expected declaration specifiers"); break; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { cp_token *tok; enum cpp_ttype ret; tok = cp_lexer_peek_token (the_parser->lexer); ret = tok->type; *value = tok->u.value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else if (ret == CPP_STRING) *value = cp_parser_string_literal (the_parser, false, false); else { cp_lexer_consume_token (the_parser->lexer); if (ret == CPP_KEYWORD) ret = CPP_NAME; } return ret; } /* External interface. */ /* Parse one entire translation unit. */ void c_parse_file (void) { static bool already_called = false; if (already_called) { sorry ("inter-module optimizations not implemented for C++"); return; } already_called = true; the_parser = cp_parser_new (); push_deferring_access_checks (flag_access_control ? dk_no_deferred : dk_no_check); cp_parser_translation_unit (the_parser); the_parser = NULL; } #include "gt-cp-parser.h"
mult_impl_gsl.h
#ifndef _MULT_IMPL_GSL_H #define _MULT_IMPL_GSL_H CPS_END_NAMESPACE #include<alg/a2a/gsl_wrapper.h> CPS_START_NAMESPACE //Implementations for meson field contractions template<typename mf_Policies, template <typename> class lA2AfieldL, template <typename> class lA2AfieldR, template <typename> class rA2AfieldL, template <typename> class rA2AfieldR > class _mult_impl{ //necessary to avoid an annoying ambigous overload when mesonfield friends mult public: typedef gsl_wrapper<typename mf_Policies::ScalarComplexType::value_type> gw; //Matrix product of meson field pairs //out(t1,t4) = l(t1,t2) * r(t3,t4) (The stored timeslices are only used to unpack TimePackedIndex so it doesn't matter if t2 and t3 are thrown away; their indices are contracted over hence the times are not needed) inline static int nearest_divisor(const int of, const int base_divisor){ //printf("nearest_divisor of %d, base_divisor %d\n", of, base_divisor); fflush(stdout); assert(base_divisor > 0); if(of % base_divisor == 0) return base_divisor; int nearest_below = base_divisor; bool no_nearest_below = false; while(of % nearest_below != 0){ --nearest_below; if(nearest_below == 0){ no_nearest_below = true; break; } } int nearest_above = base_divisor; bool no_nearest_above = false; while(of % nearest_above !=0){ ++nearest_above; if(nearest_above == of){ no_nearest_above = true; break; } } if(no_nearest_above && no_nearest_below) return of; if(no_nearest_below) return nearest_above; if(no_nearest_above) return nearest_below; int sep_above = nearest_above - base_divisor; int sep_below = base_divisor - nearest_below; return sep_above < sep_below ? nearest_above : nearest_below; } static void mult(A2AmesonField<mf_Policies,lA2AfieldL,rA2AfieldR> &out, const A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> &l, const A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> &r, const bool node_local){ typedef typename mf_Policies::ScalarComplexType ScalarComplexType; typedef typename ScalarComplexType::value_type mf_Float; assert( (void*)&out != (void*)&l || (void*)&out != (void*)&r ); if(! l.getColParams().paramsEqual( r.getRowParams() ) ){ if(!UniqueID()){ printf("mult(): Illegal matrix product: underlying vector parameters must match\n"); fflush(stdout); std::cout << "left-column: " << l.getColParams().print() << "\n"; std::cout << "right-row: " << r.getRowParams().print() << "\n"; std::cout.flush(); } exit(-1); } out.setup(l.getRowParams(),r.getColParams(), l.tl, r.tr ); //zeroes output, so safe to re-use int ni = l.getNrows(); int nk = r.getNcols(); typedef typename A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR>::RightDilutionType LeftDilutionType; typedef typename A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR>::LeftDilutionType RightDilutionType; ModeContractionIndices<LeftDilutionType,RightDilutionType> j_ind2(l.getColParams()); //these maps could be cached somewhere modeIndexSet lmodeparams; lmodeparams.time = l.tr; modeIndexSet rmodeparams; rmodeparams.time = r.tl; int nj = j_ind2.getNindices(lmodeparams,rmodeparams); int jlmap[nj], jrmap[nj]; for(int j = 0; j < nj; j++) j_ind2.getBothIndices(jlmap[j],jrmap[j],j,lmodeparams,rmodeparams); //Try a blocked matrix multiply //Because ni, nj are different and not necessarily multiples of a common blocking we need to dynamically choose the block size int nodes = 1; for(int i=0;i<5;i++) nodes *= GJP.Nodes(i); int compute_elements = omp_get_max_threads() * ( node_local ? 1 : nodes ); //Want the total number of blocks to be close to the number of compute elements = (number of nodes)*(number of threads) //We shouldn't just take the cubed-root though because quite often the number of indices differs substantially //We want ni0 * nj0 * nk0 = nodes //and the ratios to be approximately the same between the number of blocks and the number of indices //Take ratios wrt smallest so these are always >=1 int smallest = ni; if(nj < smallest) smallest = nj; if(nk < smallest) smallest = nk; int ratios[3] = {ni/smallest, nj/smallest, nk/smallest}; int base = (int)pow( compute_elements/ratios[0]/ratios[1]/ratios[2], 1/3.); //compute_element if(!base) ++base; int ni0 = nearest_divisor(ni, ratios[0]*base); int nj0 = nearest_divisor(nj, ratios[1]*base); int nk0 = nearest_divisor(nk, ratios[2]*base); assert(ni % ni0 == 0); assert(nj % nj0 == 0); assert(nk % nk0 == 0); int bi = ni/ni0; int bj = nj/nj0; int bk = nk/nk0; //parallelize ijk int work = ni0 * nj0 * nk0; int node_work, node_off; bool do_work; getNodeWork(work,node_work,node_off,do_work,node_local); //if(!UniqueID()) printf("mult sizes %d %d %d block sizes %d %d %d, num blocks %d %d %d. Work %d, node_work %d\n",ni,nj,nk,bi,bj,bk,ni0,nj0,nk0,work,node_work); if(do_work){ Float t1 = dclock(); //complex mult re = re*re - im*im, im = re*im + im*re //6 flops //complex add 2 flops Float flops_total = Float(ni)*Float(nk)*Float(nj)*8.; A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> lreord; A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> rreord; #ifndef MEMTEST_MODE r.rowReorder(rreord,jrmap,nj); l.colReorder(lreord,jlmap,nj); #endif typename gw::matrix_complex *lreord_gsl = gw::matrix_complex_alloc(ni,nj); typename gw::matrix_complex *rreord_gsl = gw::matrix_complex_alloc(nj,nk); #ifndef MEMTEST_MODE #pragma omp parallel for for(int i=0;i<ni;i++) for(int j=0;j<nj;j++){ const ScalarComplexType & el = lreord(i, j); mf_Float *el_gsl = (mf_Float*)gw::matrix_complex_ptr(lreord_gsl,i,j); *(el_gsl++) = std::real(el); *(el_gsl) = std::imag(el); } #pragma omp parallel for for(int j=0;j<nj;j++) for(int k=0;k<nk;k++){ const ScalarComplexType & el = rreord(j, k); mf_Float *el_gsl = (mf_Float*)gw::matrix_complex_ptr(rreord_gsl,j,k); *(el_gsl++) = std::real(el); *(el_gsl) = std::imag(el); } #endif static const int lcol_stride = 1; int rrow_stride = rreord.getNcols(); #pragma omp parallel for for(int i0j0k0 = node_off; i0j0k0 < node_off + node_work; ++i0j0k0){ int rem = i0j0k0; int k0 = rem % nk0; rem /= nk0; int j0 = rem % nj0; rem /= nj0; int i0 = rem; i0 *= bi; j0 *= bj; k0 *= bk; typename gw::complex tmp; typename gw::matrix_complex *tmp_out = gw::matrix_complex_alloc(bi,bk); typename gw::matrix_complex_const_view ijblock_view = gw::matrix_complex_const_submatrix(lreord_gsl,i0,j0,bi,bj); typename gw::matrix_complex_const_view jkblock_view = gw::matrix_complex_const_submatrix(rreord_gsl,j0,k0,bj,bk); const typename gw::matrix_complex *const ijblock = &ijblock_view.matrix; //gw::matrix_complex_alloc(bi,bj); const typename gw::matrix_complex *const jkblock = &jkblock_view.matrix; //gw::matrix_complex_alloc(bj,bk); typename gw::complex one; GSL_SET_COMPLEX(&one,1.0,0.0); typename gw::complex zero; GSL_SET_COMPLEX(&zero,0.0,0.0); #ifndef MEMTEST_MODE gw::matrix_complex_set_zero(tmp_out); gw::blas_gemm(CblasNoTrans, CblasNoTrans, one, ijblock, jkblock, zero, tmp_out); for(int i=0;i<bi;i++) for(int k=0;k<bk;k++){ mf_Float const* el = (mf_Float const*)gw::matrix_complex_ptr(tmp_out,i,k); mf_Float(&out_el)[2] = reinterpret_cast<mf_Float(&)[2]>(out(i0+i,k0+k)); #pragma omp atomic out_el[0] += *(el++); #pragma omp atomic out_el[1] += *(el); } #endif gw::matrix_complex_free(tmp_out); } Float t2 = dclock(); Float flops_per_sec = flops_total/(t2-t1); //if(!UniqueID()) printf("node mult flops/s %g (time %f total flops %g)\n",flops_per_sec,t2-t1,flops_total); gw::matrix_complex_free(lreord_gsl); gw::matrix_complex_free(rreord_gsl); } Float time = -dclock(); if(!node_local) out.nodeSum(); time += dclock(); //if(!UniqueID()) printf("mult comms time %g s\n",time); } }; #endif
Jacobi2D-NaiveParallel-OMP_static.test.c
/****************************************************************************** * Jacobi2D benchmark * Basic parallelisation with OpenMP * * Usage: * make omp * export OMP_NUM_THREADS=8 * bin/Jacobi2D-NaiveParallel-OMP \ * `cat src/Jacobi2D-NaiveParallel-OMP.perfexecopts` * For a run on 8 threads ******************************************************************************/ #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <getopt.h> #include <stdbool.h> #include <ctype.h> #include <math.h> #include <assert.h> #define STENCIL(read,write,x,y) space[write][x][y] = \ ( space[read][x-1][y] +\ space[read][x][y] +\ space[read][x+1][y] +\ space[read][x][y+1] +\ space[read][x][y-1] )/5; #include "util.h" // main // Stages // 1 - command line parsing // 2 - data allocation and initialization // 3 - jacobi 1D timed within an openmp loop // 4 - output and optional verification int main( int argc, char* argv[] ){ // rather than calling fflush setbuf(stdout, NULL); // 1 - command line parsing Params cmdLineArgs; parseCmdLineArgs(&cmdLineArgs,argc,argv); // 2 - data allocation and initialization int lowerBound = 1; int upperBound = lowerBound + cmdLineArgs.problemSize - 1; double** space[2]; int i; // allocate x axis space[0] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*)); space[1] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*)); if( space[0] == NULL || space[1] == NULL ){ printf( "Could not allocate x axis of space array\n" ); exit(0); } // allocate y axis for( i = 0; i < cmdLineArgs.problemSize + 2; ++i ){ space[0][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double)); space[1][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double)); if( space[0][i] == NULL || space[1][i] == NULL ){ printf( "Could not allocate y axis of space array\n" ); exit(0); } } // use global seed to seed the random number gen (will be constant) srand(cmdLineArgs.globalSeed); // first touch for openmp int x, y; #pragma omp parallel for private( x, y ) collapse(2) schedule(static) for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ space[0][x][y] = 0; } } // seed the space. for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ space[0][x][y] = rand() / (double)rand(); } } // set halo values (sanity) for( i = 0; i < cmdLineArgs.problemSize + 2; ++i){ space[0][i][0] = 0; space[1][i][0] = 0; space[0][i][cmdLineArgs.problemSize + 1] = 0; space[1][i][cmdLineArgs.problemSize + 1] = 0; space[0][0][i] = 0; space[1][0][i] = 0; space[0][cmdLineArgs.problemSize + 1][i] = 0; space[1][cmdLineArgs.problemSize + 1][i] = 0; } // 3 - jacobi 2D timed within an openmp loop double start_time = omp_get_wtime(); int t,read=0,write=1; for( t = 1; t <= cmdLineArgs.T; ++t ){ #pragma omp parallel for private( x, y ) collapse(2) schedule(static) for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ STENCIL( read, write, x, y); } } read = write; write = 1 - write; } double end_time = omp_get_wtime(); double time = (end_time - start_time); // 4 - output and optional verification if( cmdLineArgs.printtime ){ /* printf( "Threads: %d, P: %d, ",cmdLineArgs.cores, cmdLineArgs.problemSize); */ printf( "Time: %f", time ); } if( cmdLineArgs.verify ){ if(!verifyResultJacobi2D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize, cmdLineArgs.globalSeed,cmdLineArgs.T )){ fprintf(stderr,"FAILURE\n"); }else{ fprintf(stderr,"SUCCESS\n"); } } }
dgemm.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /********************************************************************************* NAME: dgemm PURPOSE: This program tests the efficiency with which a dense matrix dense multiplication is carried out USAGE: The program takes as input the number of threads, the matrix order, the number of times the matrix-matrix multiplication is carried out, and, optionally, a tile size for matrix blocking <progname> <# threads> <# iterations> <matrix order> [<tile size>] The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: Written by Rob Van der Wijngaart, September 2006. Made array dimensioning dynamic, October 2007 Allowed arbitrary block size, November 2007 Removed reverse-engineered MKL source code option, November 2007 Changed from row- to column-major storage order, November 2007 Stored blocks of B in transpose form, November 2007 ***********************************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> #ifdef MKL #include <mkl_cblas.h> #endif #ifndef DEFAULTBLOCK #define DEFAULTBLOCK 32 #endif #ifndef BOFFSET #define BOFFSET 12 #endif #define AA_arr(i,j) AA[(i)+(block+BOFFSET)*(j)] #define BB_arr(i,j) BB[(i)+(block+BOFFSET)*(j)] #define CC_arr(i,j) CC[(i)+(block+BOFFSET)*(j)] #define A_arr(i,j) A[(i)+(order)*(j)] #define B_arr(i,j) B[(i)+(order)*(j)] #define C_arr(i,j) C[(i)+(order)*(j)] #define forder (1.0*order) main(int argc, char **argv){ int iter, i,ii,j,jj,k,kk,ig,jg,kg; /* dummies */ int iterations; /* number of times the multiplication is done */ double dgemm_time, /* timing parameters */ avgtime; double checksum = 0.0, /* checksum of result */ ref_checksum; double epsilon = 1.e-8; /* error tolerance */ int nthread_input, /* thread parameters */ nthread; int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ static double *A, *B, *C; /* input (A,B) and output (C) matrices */ int order; /* number of rows and columns of matrices */ int block; /* tile size of matrices */ #ifndef MKL if (argc != 4 && argc != 5) { printf("Usage: %s <# threads> <# iterations> <matrix order> [tile size]\n",*argv); #else if (argc != 4) { printf("Usage: %s <# threads> <# iterations> <matrix order>\n",*argv); #endif exit(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: Iterations must be positive : %d \n", iterations); exit(EXIT_FAILURE); } order = atoi(*++argv); if (order < 1) { printf("ERROR: Matrix order must be positive: %d\n", order); exit(EXIT_FAILURE); } A = (double *) malloc(order*order*sizeof(double)); B = (double *) malloc(order*order*sizeof(double)); C = (double *) malloc(order*order*sizeof(double)); if (!A || !B || !C) { printf("ERROR: Could not allocate space for global matrices\n"); exit(EXIT_FAILURE); } ref_checksum = (0.25*forder*forder*forder*(forder-1.0)*(forder-1.0)); #pragma omp parallel for private(i,j) for(j = 0; j < order; j++) for(i = 0; i < order; i++) { A_arr(i,j) = B_arr(i,j) = (double) j; C_arr(i,j) = 0.0; } printf("OpenMP Dense matrix-matrix multiplication\n"); #ifndef MKL if (argc == 5) { block = atoi(*++argv); } else block = DEFAULTBLOCK; #pragma omp parallel private (i,j,k,ii,jj,kk,ig,jg,kg,iter) { double *AA, *BB, *CC; if (block > 0) { /* matrix blocks for local temporary copies */ AA = (double *) malloc(block*(block+BOFFSET)*3*sizeof(double)); if (!AA) { num_error = 1; printf("Could not allocate space for matrix tiles on thread %d\n", omp_get_thread_num()); } bail_out(num_error); BB = AA + block*(block+BOFFSET); CC = BB + block*(block+BOFFSET); } #pragma omp master { nthread = omp_get_num_threads(); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Matrix order = %d\n", order); printf("Number of threads = %d\n", nthread_input); if (block>0) printf("Blocking factor = %d\n", block); else printf("No blocking\n"); printf("Number of iterations = %d\n", iterations); } } bail_out(num_error); for (iter=0; iter<=iterations; iter++) { if (iter==1) { #pragma omp barrier #pragma omp master { dgemm_time = wtime(); } } if (block > 0) { #pragma omp for for(jj = 0; jj < order; jj+=block){ for(kk = 0; kk < order; kk+=block) { for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (kg=kk,k=0; kg<MIN(kk+block,order); k++,kg++) BB_arr(j,k) = B_arr(kg,jg); for(ii = 0; ii < order; ii+=block){ for (kg=kk,k=0; kg<MIN(kk+block,order); k++,kg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) AA_arr(i,k) = A_arr(ig,kg); for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) CC_arr(i,j) = 0.0; for (kg=kk,k=0; kg<MIN(kk+block,order); k++,kg++) for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) CC_arr(i,j) += AA_arr(i,k)*BB_arr(j,k); for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) C_arr(ig,jg) += CC_arr(i,j); } } } } else { #pragma omp for for (jg=0; jg<order; jg++) for (kg=0; kg<order; kg++) for (ig=0; ig<order; ig++) C_arr(ig,jg) += A_arr(ig,kg)*B_arr(kg,jg); } } /* end of iterations */ #pragma omp barrier #pragma omp master { dgemm_time = wtime() - dgemm_time; } } /* end of parallel region */ #else printf("Matrix size = %dx%d\n", order, order); printf("Number of threads = %d\n", nthread_input); printf("Using Math Kernel Library\n"); printf("Number of iterations = %d\n", iterations); for (iter=0; iter<=iterations; iter++) { if (iter==1) dgemm_time = wtime(); cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, order, order, order, 1.0, &(A_arr(0,0)), order, &(B_arr(0,0)), order, 1.0, &(C_arr(0,0)), order); } dgemm_time = wtime()-dgemm_time; #endif for(checksum=0.0,j = 0; j < order; j++) for(i = 0; i < order; i++) checksum += C_arr(i,j); /* verification test */ ref_checksum *= (iterations+1); if (ABS((checksum - ref_checksum)/ref_checksum) > epsilon) { printf("ERROR: Checksum = %lf, Reference checksum = %lf\n", checksum, ref_checksum); exit(EXIT_FAILURE); } else { printf("Solution validates\n"); #ifdef VERBOSE printf("Reference checksum = %lf, checksum = %lf\n", ref_checksum, checksum); #endif } double nflops = 2.0*forder*forder*forder; avgtime = dgemm_time/iterations; printf("Rate (MFlops/s): %lf Avg time (s): %lf\n", 1.0E-06 *nflops/avgtime, avgtime); exit(EXIT_SUCCESS); }
XT_genSinogram.c
/* ============================================================================ * Copyright (c) 2013 K. Aditya Mohan (Purdue University) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of K. Aditya Mohan, Purdue * University, nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /*#include <iostream>*/ /*#include "TiffUtilities.h"*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "XT_Structures.h" #include "XT_Constants.h" #include "allocate.h" #include <math.h> #include "XT_IOMisc.h" #include "XT_AMatrix.h" #include "XT_Profile.h" #include "randlib.h" #include "XT_Init.h" #include "XT_Debug.h" #include <fftw3.h> #include "XT_CmplxArith.h" #include "XT_FresnelTran.h" #include "XT_MPIIO.h" /*generates projection data from phantom*/ int32_t ForwardProject (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, int32_t proj_rows, int32_t proj_cols, float *measurements, float *brights) { FILE *fp; long int stream_offset, size, result; int32_t i, j, k, m, n, idx, t, slice, r_subsmpl, t_subsmpl, r_idx, t_idx, r_origidx, t_origidx; Real_t measurement_avg = 0, magpixel, phasepixel, val, expval, real, imag, measure_buf; uint8_t AvgNumXElements, AvgNumZElements; char phantom_file[1000]; int dimTiff[4]; Real_arr_t* tifarray = (Real_arr_t*)get_spc(SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r, sizeof(Real_arr_t)); float*** magobject = (float***)multialloc(sizeof(float), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); float*** phaseobject = (float***)multialloc(sizeof(float), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); Real_arr_t*** realmagobject = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); Real_arr_t*** realphaseobject = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); Real_arr_t*** projs_real = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_t, SinogramPtr->N_r); Real_arr_t*** projs_imag = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_t, SinogramPtr->N_r); Real_arr_t*** fftforw_space = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_t, SinogramPtr->N_r); Real_arr_t*** fftback_space = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_t, SinogramPtr->N_r); /* Real_arr_t*** fftforw_freq = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_t, SinogramPtr->N_r); Real_arr_t*** fftback_freq = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_t, SinogramPtr->N_r);*/ fftw_complex **fftforw_arr, **fftback_arr; fftw_plan *fftforw_plan, *fftback_plan; fftforw_arr = (fftw_complex**)get_spc(SinogramPtr->N_p, sizeof(fftw_complex*)); fftback_arr = (fftw_complex**)get_spc(SinogramPtr->N_p, sizeof(fftw_complex*)); fftforw_plan = (fftw_plan*)get_spc(SinogramPtr->N_p, sizeof(fftw_plan)); fftback_plan = (fftw_plan*)get_spc(SinogramPtr->N_p, sizeof(fftw_plan)); for (i = 0; i < SinogramPtr->N_p; i++) { fftforw_arr[i] = (fftw_complex*) fftw_malloc(sizeof(fftw_complex)*SinogramPtr->N_t*SinogramPtr->N_r); fftback_arr[i] = (fftw_complex*) fftw_malloc(sizeof(fftw_complex)*SinogramPtr->N_t*SinogramPtr->N_r); fftforw_plan[i] = fftw_plan_dft_2d(SinogramPtr->N_r, SinogramPtr->N_t, fftforw_arr[i], fftforw_arr[i], FFTW_FORWARD, FFTW_ESTIMATE); fftback_plan[i] = fftw_plan_dft_2d(SinogramPtr->N_r, SinogramPtr->N_t, fftback_arr[i], fftback_arr[i], FFTW_BACKWARD, FFTW_ESTIMATE); } memset(&(projs_real[0][0][0]), 0, SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r*sizeof(Real_arr_t)); memset(&(projs_imag[0][0][0]), 0, SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r*sizeof(Real_arr_t)); memset(&(measurements[0]), 0, SinogramPtr->N_p*proj_rows*proj_cols*sizeof(float)); memset(&(brights[0]), 0, proj_cols*proj_rows*sizeof(float)); /*AvgNumXElements over estimates the total number of entries in a single column of A matrix when indexed by both voxel and angle*/ AvgNumXElements = (uint8_t)ceil(3*ScannedObjectPtr->delta_xy/(SinogramPtr->delta_r)); SinogramPtr->DetectorResponse = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_p, DETECTOR_RESPONSE_BINS+1); SinogramPtr->ZLineResponse = (Real_arr_t *)get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t)); DetectorResponseProfile (SinogramPtr, ScannedObjectPtr, TomoInputsPtr); ZLineResponseProfile (SinogramPtr, ScannedObjectPtr, TomoInputsPtr); AvgNumZElements = (uint8_t)((ScannedObjectPtr->delta_z/SinogramPtr->delta_t) + 2); AMatrixCol* VoxelLineResponse = (AMatrixCol*)get_spc(ScannedObjectPtr->N_z,sizeof(AMatrixCol)); for (t = 0; t < ScannedObjectPtr->N_z; t++){ VoxelLineResponse[t].values = (Real_t*)get_spc(AvgNumZElements, sizeof(Real_t)); VoxelLineResponse[t].index = (int32_t*)get_spc(AvgNumZElements, sizeof(int32_t)); } storeVoxelLineResponse(VoxelLineResponse, ScannedObjectPtr, SinogramPtr); r_subsmpl = PHANTOM_XY_SIZE/proj_cols; t_subsmpl = PHANTOM_Z_SIZE/proj_rows; sprintf(phantom_file, "%s", MAG_PHANTOM_FILEPATH); fp = fopen (phantom_file, "rb"); check_error(fp==NULL, TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Error in reading file %s\n", phantom_file); size = (long int)ScannedObjectPtr->N_z*(long int)ScannedObjectPtr->N_y*(long int)ScannedObjectPtr->N_x; check_info(TomoInputsPtr->node_rank==0,TomoInputsPtr->debug_file_ptr, "Forward projecting mag phantom ...\n"); /* stream_offset = (long int)PHANTOM_OFFSET*(long int)ScannedObjectPtr->N_z*(long int)ScannedObjectPtr->N_y*(long int)ScannedObjectPtr->N_x*(long int)TomoInputsPtr->node_num; */ stream_offset = (long int)ScannedObjectPtr->N_z*(long int)ScannedObjectPtr->N_y*(long int)ScannedObjectPtr->N_x*(long int)TomoInputsPtr->node_rank; result = fseek (fp, stream_offset*sizeof(float), SEEK_SET); check_error(result != 0, TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: Error in seeking file %s, stream_offset = %ld\n",phantom_file,stream_offset); result = fread (&(magobject[0][0][0]), sizeof(float), size, fp); check_error(result != size, TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: Reading file %s, Number of elements read does not match required, number of elements read=%ld, stream_offset=%ld, size=%ld\n",phantom_file,result,stream_offset,size); fclose(fp); sprintf(phantom_file, "%s", PHASE_PHANTOM_FILEPATH); fp = fopen (phantom_file, "rb"); check_error(fp==NULL, TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Error in reading file %s\n", phantom_file); size = (long int)ScannedObjectPtr->N_z*(long int)ScannedObjectPtr->N_y*(long int)ScannedObjectPtr->N_x; check_info(TomoInputsPtr->node_rank==0,TomoInputsPtr->debug_file_ptr, "Forward projecting phase phantom ...\n"); stream_offset = (long int)ScannedObjectPtr->N_z*(long int)ScannedObjectPtr->N_y*(long int)ScannedObjectPtr->N_x*(long int)TomoInputsPtr->node_rank; result = fseek (fp, stream_offset*sizeof(float), SEEK_SET); check_error(result != 0, TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: Error in seeking file %s, stream_offset = %ld\n",phantom_file,stream_offset); result = fread (&(phaseobject[0][0][0]), sizeof(float), size, fp); check_error(result != size, TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: Reading file %s, Number of elements read does not match required, number of elements read=%ld, stream_offset=%ld, size=%ld\n",phantom_file,result,stream_offset,size); fclose(fp); #pragma omp parallel for private(i,j,k,slice,magpixel,phasepixel,idx,val,m,n) for (i=0; i<SinogramPtr->N_p; i++){ AMatrixCol AMatrix; AMatrix.values = (Real_t*)get_spc((int32_t)AvgNumXElements,sizeof(Real_t)); AMatrix.index = (int32_t*)get_spc((int32_t)AvgNumXElements,sizeof(int32_t)); for (j=0; j<ScannedObjectPtr->N_y; j++) for (k=0; k<ScannedObjectPtr->N_x; k++){ calcAMatrixColumnforAngle(SinogramPtr, ScannedObjectPtr, SinogramPtr->DetectorResponse, &AMatrix, j, k, i, SinogramPtr->Light_Wavenumber); for (slice=0; slice<ScannedObjectPtr->N_z; slice++){ magpixel = (Real_t)(magobject[slice][j][k]); /* if (magpixel < 0) magpixel = 0; else magpixel = (ABSORP_COEF_2 - ABSORP_COEF_1)*magpixel + ABSORP_COEF_1;*/ realmagobject[slice][j][k] = (Real_arr_t)magpixel; phasepixel = (Real_t)(phaseobject[slice][j][k]); /* if (phasepixel < 0) phasepixel = 0; else phasepixel = (REF_IND_DEC_2 - REF_IND_DEC_1)*phasepixel + REF_IND_DEC_1;*/ realphaseobject[slice][j][k] = (Real_arr_t)phasepixel; /*phasepixel = 0;*/ /*IMPORTANT: Always make sure phantom has no negative values.*/ for (m=0; m<AMatrix.count; m++){ idx=AMatrix.index[m]; val=AMatrix.values[m]; for (n=0; n<VoxelLineResponse[slice].count; n++) { projs_real[i][VoxelLineResponse[slice].index[n]][idx] += magpixel*val*VoxelLineResponse[slice].values[n]; projs_imag[i][VoxelLineResponse[slice].index[n]][idx] += phasepixel*val*VoxelLineResponse[slice].values[n]; } } } } free(AMatrix.values); free(AMatrix.index); } if (TomoInputsPtr->Write2Tiff == 1) { dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_t; dimTiff[3] = SinogramPtr->N_r; if (WriteMultiDimArray2Tiff ("SimMagProjs", dimTiff, 0, 1, 2, 3, &(projs_real[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} if (WriteMultiDimArray2Tiff ("SimPhaseProjs", dimTiff, 0, 1, 2, 3, &(projs_imag[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} dimTiff[0] = 1; dimTiff[1] = ScannedObjectPtr->N_z; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; if (WriteMultiDimArray2Tiff ("mag_phantom", dimTiff, 0, 1, 2, 3, &(realmagobject[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} if (WriteMultiDimArray2Tiff ("phase_phantom", dimTiff, 0, 1, 2, 3, &(realphaseobject[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} } size = SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r; write_SharedBinFile_At ("SimMagProjs", &(projs_real[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); write_SharedBinFile_At ("SimPhaseProjs", &(projs_imag[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; write_SharedBinFile_At ("mag_phantom", &(realmagobject[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); write_SharedBinFile_At ("phase_phantom", &(realphaseobject[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); measurement_avg = 0; for (slice=0; slice < proj_rows; slice++) for (j=0; j < proj_cols; j++) brights[slice*proj_cols + j] = EXPECTED_COUNT_MEASUREMENT; check_info(TomoInputsPtr->node_rank==0,TomoInputsPtr->debug_file_ptr, "The expected count is %d\n", EXPECTED_COUNT_MEASUREMENT); /* #pragma omp parallel for private(slice, j, expval, real, imag, idx) reduction(+:measurement_avg)*/ for (i=0; i < SinogramPtr->N_p; i++) { for (slice=0; slice < SinogramPtr->N_t; slice++) for (j=0; j < SinogramPtr->N_r; j++) { expval = exp(-projs_real[i][slice][j]); /*fftarr[j*SinogramPtr->N_t + slice][0] = EXPECTED_COUNT_MEASUREMENT*expval*cos(-projs_imag[i][slice][j]); fftarr[j*SinogramPtr->N_t + slice][1] = EXPECTED_COUNT_MEASUREMENT*expval*sin(-projs_imag[i][slice][j]);*/ cmplx_mult (&real, &imag, expval*cos(-projs_imag[i][slice][j]), expval*sin(-projs_imag[i][slice][j]), sqrt(EXPECTED_COUNT_MEASUREMENT), (Real_t)(0)); fftforw_arr[i][j*SinogramPtr->N_t + slice][0] = real; fftforw_arr[i][j*SinogramPtr->N_t + slice][1] = imag; /*weights[2*idx] = (val + sqrt(fabs(val))*normal());*/ /*weights[2*idx+1] = (val + sqrt(fabs(val))*normal());*/ fftforw_space[i][slice][j] = sqrt(real*real + imag*imag); } /* fftw_execute(fftforw_plan); for (slice=0; slice < SinogramPtr->N_t; slice++) for (j=0; j < SinogramPtr->N_r; j++) fftforw_freq[i][slice][j] = sqrt(fftforw_arr[j*SinogramPtr->N_t + slice][0]*fftforw_arr[j*SinogramPtr->N_t + slice][0] + fftforw_arr[j*SinogramPtr->N_t + slice][1]*fftforw_arr[j*SinogramPtr->N_t + slice][1]);*/ compute_FresnelTran (SinogramPtr->N_r, SinogramPtr->N_t, SinogramPtr->delta_r, SinogramPtr->delta_t, fftforw_arr[i], &(fftforw_plan[i]), fftback_arr[i], &(fftback_plan[i]), SinogramPtr->Light_Wavelength, SinogramPtr->Obj2Det_Distance, SinogramPtr->Freq_Window); /* for (slice=0; slice < SinogramPtr->N_t; slice++) for (j=0; j < SinogramPtr->N_r; j++) fftback_freq[i][slice][j] = sqrt(fftback_arr[j*SinogramPtr->N_t + slice][0]*fftback_arr[j*SinogramPtr->N_t + slice][0] + fftback_arr[j*SinogramPtr->N_t + slice][1]*fftback_arr[j*SinogramPtr->N_t + slice][1]); fftw_execute(fftback_plan);*/ for (slice=0; slice < proj_rows; slice++) for (j=0; j < proj_cols; j++) { idx = i*proj_rows*proj_cols + slice*proj_cols + j; for (t_idx = 0; t_idx < t_subsmpl; t_idx++) for (r_idx = 0; r_idx < r_subsmpl; r_idx++) { t_origidx = slice*t_subsmpl + t_idx; r_origidx = j*r_subsmpl + r_idx; measure_buf = (fftback_arr[i][r_origidx*SinogramPtr->N_t + t_origidx][0]*fftback_arr[i][r_origidx*SinogramPtr->N_t + t_origidx][0] + fftback_arr[i][r_origidx*SinogramPtr->N_t + t_origidx][1]*fftback_arr[i][r_origidx*SinogramPtr->N_t + t_origidx][1]); measurements[idx] += measure_buf; fftback_space[i][t_origidx][r_origidx] = sqrt(measure_buf); /*measurements[idx] = sqrt(fabs(measurements[idx]));*/ /*weights[idx] = 1.0/measurements[idx]; weight_avg += weights[idx];*/ } measurements[idx] /= (t_subsmpl*r_subsmpl); measurements[idx] = fabs(measurements[idx] + sqrt(fabs(measurements[idx]))*normal()); measurement_avg += measurements[idx]; } } if (TomoInputsPtr->Write2Tiff == 1) { size = SinogramPtr->N_p*proj_rows*proj_cols; dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = proj_rows; dimTiff[3] = proj_cols; /* for (i = 0; i < size; i++) tifarray[i] = measurements[2*i]; if (WriteMultiDimArray2Tiff ("measurements_real", dimTiff, 0, 2, 1, 3, &(tifarray[0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} for (i = 0; i < size; i++) tifarray[i] = measurements[2*i+1]; if (WriteMultiDimArray2Tiff ("measurements_imag", dimTiff, 0, 2, 1, 3, &(tifarray[0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;}*/ for (i = 0; i < size; i++) tifarray[i] = measurements[i]; if (WriteMultiDimArray2Tiff ("measurements", dimTiff, 0, 1, 2, 3, &(tifarray[0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("measurements", &(tifarray[0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); /* for (i = 0; i < size; i++) tifarray[i] = weights[i]; if (WriteMultiDimArray2Tiff ("weights", dimTiff, 0, 2, 1, 3, &(tifarray[0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("weights", &(tifarray[0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr);*/ size = proj_rows*proj_cols; dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = proj_rows; dimTiff[3] = proj_cols; for (i = 0; i < size; i++) tifarray[i] = brights[i]; if (WriteMultiDimArray2Tiff ("brights", dimTiff, 0, 1, 2, 3, &(tifarray[0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("brights", &(tifarray[0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_t; dimTiff[3] = SinogramPtr->N_r; size = SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r; if (WriteMultiDimArray2Tiff ("fftforw_space", dimTiff, 0, 1, 2, 3, &(fftforw_space[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("fftforw_space", &(fftforw_space[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); if (WriteMultiDimArray2Tiff ("fftback_space", dimTiff, 0, 1, 2, 3, &(fftback_space[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("fftback_space", &(fftback_space[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); /*if (WriteMultiDimArray2Tiff ("fftforw_freq", dimTiff, 0, 1, 2, 3, &(fftforw_freq[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("fftforw_freq", &(fftforw_freq[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr); if (WriteMultiDimArray2Tiff ("fftback_freq", dimTiff, 0, 1, 2, 3, &(fftback_freq[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr)) {goto error;} write_SharedBinFile_At ("fftback_freq", &(fftback_freq[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr);*/ } measurement_avg /= (SinogramPtr->N_p*proj_cols*proj_rows); printf("genSinogramFromPhantom: The average of all measurement data with/without noise is %f\n", measurement_avg); free(VoxelLineResponse->values); free(VoxelLineResponse->index); multifree(SinogramPtr->DetectorResponse,2); free(SinogramPtr->ZLineResponse); free(VoxelLineResponse); multifree(magobject,3); multifree(phaseobject,3); multifree(realmagobject,3); multifree(realphaseobject,3); multifree(projs_real,3); multifree(projs_imag,3); multifree(fftforw_space, 3); multifree(fftback_space, 3); free(tifarray); for (i = 0; i < SinogramPtr->N_p; i++) { fftw_destroy_plan(fftforw_plan[i]); fftw_destroy_plan(fftback_plan[i]); fftw_free(fftforw_arr[i]); fftw_free(fftback_arr[i]); } free(fftforw_arr); free(fftback_arr); /* multifree(fftforw_freq, 3); multifree(fftback_freq, 3); */ return (0); error: free(VoxelLineResponse->values); free(VoxelLineResponse->index); multifree(SinogramPtr->DetectorResponse,2); free(SinogramPtr->ZLineResponse); free(VoxelLineResponse); multifree(magobject,3); multifree(phaseobject,3); multifree(projs_real,3); multifree(projs_imag,3); free(tifarray); return (-1); }
idasFoodWeb_kry_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDAS: Food web problem, OpenMP, GMRES, * user-supplied preconditioner * * This example program uses SUNLinSol_SPGMR as the linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDAS using the SUNLinSol_SPGMR linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idasFoodWeb_kry_omp * To specify the number of threads at the command line, use * % ./idasFoodWeb_kry_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunlinsol/sunlinsol_spgmr.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_dense.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* helpful macros */ #ifndef MAX #define MAX(A, B) ((A) > (B) ? (A) : (B)) #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; realtype **PP[MX][MY]; sunindextype *pivot[MX][MY]; N_Vector rates; N_Vector ewt; void *ida_mem; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data); static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype delta, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(int maxl, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, jx, jy, retval; int maxl; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */ #endif if (argc > 1) num_threads = (int) strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->ewt = N_VNew_OpenMP(NEQ, num_threads); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy++) { (webdata->pivot)[jx][jy] = newIndexArray(NUM_SPECIES); (webdata->PP)[jx][jy] = newDenseMat(NUM_SPECIES, NUM_SPECIES); } } webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); webdata->ida_mem = ida_mem; /* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set preconditioning routines. */ maxl = 16; /* max dimension of the Krylov subspace */ LS = SUNLinSol_SPGMR(cc, PREC_LEFT, maxl); /* IDA only allows left preconditioning */ if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, NULL); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); retval = IDASetPreconditioner(ida_mem, Precond, PSolve); if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(maxl, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); N_VDestroy(cc); N_VDestroy(cp); N_VDestroy(id); destroyMat(webdata->acoef); N_VDestroy(webdata->rates); N_VDestroy(webdata->ewt); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy ++) { destroyArray((webdata->pivot)[jx][jy]); destroyMat((webdata->PP)[jx][jy]); } } free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data) { int retval; sunindextype ret; realtype uround, xx, yy, del_x, del_y; realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp; realtype inc, fac, sqru, perturb_rates[NUM_SPECIES]; int is, js, jx, jy; void *ida_mem; N_Vector ewt; realtype hh; UserData webdata; webdata = (UserData) user_data; del_x = webdata->dx; del_y = webdata->dy; uround = UNIT_ROUNDOFF; sqru = sqrt(uround); ida_mem = webdata->ida_mem; ewt = webdata->ewt; retval = IDAGetErrWeights(ida_mem, ewt); if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1); retval = IDAGetCurrentStep(ida_mem, &hh); if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1); for (jy = 0; jy < MY; jy++) { yy = jy * del_y; for (jx = 0; jx < MX; jx++) { xx = jx * del_x; Pxy = (webdata->PP)[jx][jy]; cxy = IJ_Vptr(cc, jx, jy); cpxy = IJ_Vptr(cp, jx, jy); ewtxy = IJ_Vptr(ewt, jx, jy); ratesxy = IJ_Vptr((webdata->rates), jx, jy); for (js = 0; js < NUM_SPECIES; js++) { inc = sqru*(MAX(fabs(cxy[js]), MAX(hh*fabs(cpxy[js]), ONE/ewtxy[js]))); cctmp = cxy[js]; cxy[js] += inc; fac = -ONE/inc; WebRates(xx, yy, cxy, perturb_rates, webdata); Pxycol = Pxy[js]; for (is = 0; is < NUM_SPECIES; is++) Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac; if (js < 1) Pxycol[js] += cj; cxy[js] = cctmp; } ret = denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]); if (ret != 0) return(1); } } return(0); } static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype dalta, void *user_data) { realtype **Pxy, *zxy; sunindextype *pivot; int jx, jy; UserData webdata; webdata = (UserData) user_data; N_VScale(ONE, rvec, zvec); #pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads) for (jx = 0; jx < MX; jx++) { for (jy = 0; jy <MY; jy++) { zxy = IJ_Vptr(zvec, jx, jy); Pxy = (webdata->PP)[jx][jy]; pivot = (webdata->pivot)[jx][jy]; denseGETRS(Pxy, NUM_SPECIES, pivot, zxy); } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(int maxl, realtype rtol, realtype atol) { printf("\nidasFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNLinSol_SPGMR, maxl = %d\n", maxl); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, sli, netf, nps, npevals, nrevalsLS; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumLinIters(ida_mem, &sli); check_retval(&retval, "IDAGetNumLinIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumPrecSolves(ida_mem, &nps); check_retval(&retval, "IDAGetNumPrecSolves", 1); retval = IDAGetNumPrecEvals(ida_mem, &npevals); check_retval(&retval, "IDAGetNumPrecEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre); printf("Number of Preconditioner evaluations = %ld\n", npevals); printf("Number of linear iterations = %ld\n", sli); printf("Number of error test failures = %ld\n", netf); printf("Number of precond solve fun called = %ld\n", nps); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
e5.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> /** * a) #pragma omp parallel: designates a block that is executed by multiple * threads. When a master thread reaches the block, a team of threads is * created and each thread executes a duplicate of the code. * #pragma omp for: can be used inside a parallel region. Designates that * the iterations of the following loop should be executed in parallel * by the team of threads * #pragma omp parallel for: convenience method that behaves identically * to a parallel directive followed immediately by a for directive * * b) The problem of data/loop dependency exists when loops that access shared data * are executed in parallel. * In the program there exists a `read-after-write hazard` since the array must * be first written to at the position x[i-1] before it can be read and the * next factorial can be computed **/ void factorial(int x[], int n); int main () { int arr[8]; factorial(arr, 8); return 0; } void factorial(int x[], int n) { int i,j,thread_id=42; /** * Add the ordered clause since there is a data dependency * inside the else clause of the for loop **/ #pragma omp parallel for ordered num_threads(4) \ default(none) private(i,j, thread_id) shared (n,x) schedule(static, 1) for(i=0; i<n; i++) { if(i<2) x[i] = 1; else #pragma omp ordered // the following code will be executed in the order of the loop x[i] = x[i-1]*i; thread_id = omp_get_thread_num(); printf("Thread id # %d computed factorial(%d) = %d \n", thread_id, i, x[i]); } for(j=0; j<n; j++) printf("%d\t",x[j]); printf("\n"); }
mssql12_fmt_plug.c
/* Modified in August, 2012 by Dhiru Kholia (dhiru at openwall.com) for MS SQL 2012 * * This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Modified by Mathieu Perrin (mathieu at tpfh.org) 09/06 * Microsoft MS-SQL05 password cracker * * UTF-8 support by magnum 2011, same terms as above * * Creating MS SQL 2012 hashes: * * sqlcmd -L * sqlcmd -S <server> -U sa -P <password> * 1> select pwdencrypt("openwall") * 2> go * * Dumping hashes from MS SQL server 2012: * * sqlcmd -S <server> -U sa -P <password> * 1> select * from sys.sql_logins * 2> go */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mssql12; #elif FMT_REGISTERS_H john_register_one(&fmt_mssql12); #else #include <string.h> #include "arch.h" //#undef _OPENMP //#undef SIMD_COEF_32 //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #include "misc.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "sha2.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 1024 // tuned K8-dual HT #endif #endif #endif #define FORMAT_LABEL "mssql12" #define FORMAT_NAME "MS SQL 2012/2014" #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH ((111 - SALT_SIZE) / 2) #define CIPHERTEXT_LENGTH 54 + 44 * 2 #define BINARY_SIZE 8 #define DIGEST_SIZE 64 #define BINARY_ALIGN 8 #define SALT_SIZE 4 #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifndef SHA_BUF_SIZ #define SHA_BUF_SIZ 16 #endif static struct fmt_tests tests[] = { {"0x0200F733058A07892C5CACE899768F89965F6BD1DED7955FE89E1C9A10E27849B0B213B5CE92CC9347ECCB34C3EFADAF2FD99BFFECD8D9150DD6AACB5D409A9D2652A4E0AF16", "Password1!"}, {"0x0200AB3E1F9028A739EEF62ABF672427276A32D5EDD349E638E7F2CD81DAA247CFE20EE4E3B0A30B2D0AE3C3FA010E61752F1BF45E045041F1B988C083C7F118527E3E5F0562", "openwall"}, /* hashes from https://hashcat.net/forum */ {"0x02006BF4AB05873FF0C8A4AFD1DC5912CBFDEF62E0520A3353B04E1184F05C873C9C76BBADDEAAC1E9948C7B6ABFFD62BFEFD7139F17F6AFE10BE0FEE7A178644623067C2423", "carlos"}, {"0x0200935819BA20F1C7289CFF2F8FF9F0E40DA5E6D04986F988CFE6603DA0D2BC0160776614763198967D603FBD8C103151A15E70D18E7B494C7F13F16804A7A4EB206084E632", "test"}, {"0x0200570AC969EF7C6CCB3312E8BEDE1D635EB852C06496957F0FA845B20FCD1C7C457474A5B948B68C47C2CB704D08978871F532C9EB11199BB5F56A06AC915C3799DB8A64C1", "test1"}, {"0x0200A56045DBCD848E297FA8D06E7579D62B7129928CA0BC5D232A7320972EF5A5455C01411B8D3A7FF3D18A55058A12FAEE5DA410AFE6CE61FF5C39E5FF57CD3EDD57DB1C3B", "test2"}, {"0x020059799F1B6D897BE2C5A76D3FFDC52B308190E82FA01F2FA51129B4863A7EE21B3FF6FE9F7850976045237805F338DD36DC9345B429F47A402614C6F2F2B02C56DF14C4F4", "Paul"}, {"0x0200881E2999DD8E3583695F405696257B99559953705A34D774C15AC1D42699BB77BC56DB5F657751335C1B350890E643790553B60329CAE7A2E7D3C04CF8856C4DB0058723", "DBAmaster"}, {"0x0200D648446E70180A6DFB6DF14DB38623EBFE490FE445751900FD5DC45A2B5D20D7AFFE8C6FFC2890BAE1AF34430A21F2F1E4DE50E25757FDB4789716D8D85C6985A00BC454", "database"}, {"0x02008AC3B9DC7B67EF9D3C1D25D8007A4B957D5BD61D71E5E9DA08D9F8F012EDDAD168E1CADD93D4627433FBFEE8BCF6CBB42D5B9A31886FC5FF7F970B164F4B5815E03D6DE7", "jhl9mqe5"}, {"0x020094C4D05A082DB1362B1A972C5D5F1C04C527090A7427E93C13AFEC705A011D8980E994FA647C7D44E25A427246218E25674571DB1710E49C713FB17129549C29E303086A", "coldfusion"}, {"0x0200B9BD5C85918D9BEE84417957618FBA1CB80B71E81550FAE09AD027B4089017CD6461D8EC9509873C2D5096CDBE8F16E4EFA9035C35F9F4917CE58DB99DC6836CEA7483A7", "sql2005"}, {NULL} }; static unsigned char cursalt[SALT_SIZE]; #ifdef SIMD_COEF_64 static ARCH_WORD_64 (*saved_key)[SHA_BUF_SIZ]; static ARCH_WORD_64 (*crypt_out); static int max_keys; static int new_keys; #else static char (*saved_key)[(PLAINTEXT_LENGTH + 1) * 2 + SALT_SIZE]; static ARCH_WORD_64 (*crypt_out)[DIGEST_SIZE / 8]; static int *saved_len; #endif static int valid(char *ciphertext, struct fmt_main *self) { int i; if (strlen(ciphertext) != CIPHERTEXT_LENGTH) return 0; if (strncmp(ciphertext, "0x0200", 6)) return 0; for (i = 6; i < CIPHERTEXT_LENGTH; i++) { if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || //(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static void set_salt(void *salt) { memcpy(cursalt, salt, SALT_SIZE); #ifdef SIMD_COEF_64 new_keys = 1; #endif } static void *get_salt(char *ciphertext) { static unsigned char *out2; int l; if (!out2) out2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); for (l = 0;l<SALT_SIZE;l++) { out2[l] = atoi16[ARCH_INDEX(ciphertext[l*2+6])]*16 + atoi16[ARCH_INDEX(ciphertext[l*2+7])]; } return out2; } static void set_key_enc(char *_key, int index); static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_64 saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt, 8 * sizeof(ARCH_WORD_64), MEM_ALIGN_SIMD); max_keys = self->params.max_keys_per_crypt; #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); #endif if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3); if (options.target_enc != ISO_8859_1 && options.target_enc != ASCII) self->methods.set_key = set_key_enc; } static void done(void) { #ifndef SIMD_COEF_64 MEM_FREE(saved_len); #endif MEM_FREE(crypt_out); MEM_FREE(saved_key); } #ifdef SIMD_COEF_64 static void clear_keys(void) { memset(saved_key, 0, sizeof(*saved_key) * max_keys); } #endif static void set_key(char *_key, int index) { #ifndef SIMD_COEF_64 /* ASCII or ISO-8859-1 to UCS-2 */ UTF8 *s = (UTF8*)_key; UTF16 *d = (UTF16*)saved_key[index]; for (saved_len[index] = 0; s[saved_len[index]]; saved_len[index]++) #if ARCH_LITTLE_ENDIAN d[saved_len[index]] = s[saved_len[index]]; #else d[saved_len[index]] = s[saved_len[index]] << 8; #endif d[saved_len[index]] = 0; saved_len[index] <<= 1; #else ARCH_WORD_64 *keybuffer = saved_key[index]; unsigned short *w16 = (unsigned short*)keybuffer; UTF8 *key = (UTF8*)_key; int len = 0; while ((*w16++ = *key++)) len++; keybuffer[15] = ((len << 1) + SALT_SIZE) << 3; new_keys = 1; #endif } static void set_key_enc(char *_key, int index) { #ifndef SIMD_COEF_64 /* Any encoding -> UTF-16 */ saved_len[index] = enc_to_utf16((UTF16*)saved_key[index], PLAINTEXT_LENGTH, (unsigned char*)_key, strlen(_key)); if (saved_len[index] < 0) saved_len[index] = strlen16((UTF16*)saved_key[index]); saved_len[index] <<= 1; #else ARCH_WORD_64 *keybuffer = saved_key[index]; UTF16 *w16 = (UTF16*)keybuffer; UTF8 *key = (UTF8*)_key; int len; len = enc_to_utf16(w16, PLAINTEXT_LENGTH, key, strlen(_key)); if (len < 0) len = strlen16(w16); keybuffer[15] = ((len << 1) + SALT_SIZE) << 3; new_keys = 1; #endif } static char *get_key(int index) { #ifndef SIMD_COEF_64 ((UTF16*)saved_key[index])[saved_len[index]>>1] = 0; return (char*)utf16_to_enc((UTF16*)saved_key[index]); #else ARCH_WORD_64 *keybuffer = saved_key[index]; UTF16 *w16 = (UTF16*)keybuffer; static UTF16 out[PLAINTEXT_LENGTH + 1]; unsigned int i, len; len = ((keybuffer[15] >> 3) - SALT_SIZE) >> 1; for(i = 0; i < len; i++) out[i] = w16[i]; out[i] = 0; return (char*)utf16_to_enc(out); #endif } static void *get_binary(char *ciphertext) { static ARCH_WORD_64 out[SHA_BUF_SIZ]; char *realcipher = (char*)out; int i; for (i = 0;i<DIGEST_SIZE;i++) realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+14])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+15])]; #ifdef SIMD_COEF_64 alter_endianity_to_BE64 (realcipher, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha512_reverse(out); #endif #endif return (void *)realcipher; } #define BASE_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64) #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || PLAINTEXT_LENGTH > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_64 if (new_keys) { int i; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { ARCH_WORD_64 *keybuffer = saved_key[index + i]; unsigned char *wucp = (unsigned char*)keybuffer; int j, len = (keybuffer[15] >> 3) - SALT_SIZE; if (len >= 0) for (j = 0; j < SALT_SIZE; j++) wucp[len + j] = cursalt[j]; wucp[len + 4] = 0x80; } } SIMDSHA512body(&saved_key[index], &crypt_out[BASE_IDX], NULL, SSEi_REVERSE_STEPS | SSEi_FLAT_IN); #else SHA512_CTX ctx; memcpy(saved_key[index]+saved_len[index], cursalt, SALT_SIZE); SHA512_Init(&ctx ); SHA512_Update(&ctx, saved_key[index], saved_len[index]+SALT_SIZE ); SHA512_Final((unsigned char *)crypt_out[index], &ctx); #endif } #ifdef SIMD_COEF_64 new_keys = 0; #endif return count; } #define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64) #ifdef SIMD_COEF_64 static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return (crypt_out[index])[0] & PH_MASK_0; } static int get_hash_1(int index) { return (crypt_out[index])[0] & PH_MASK_1; } static int get_hash_2(int index) { return (crypt_out[index])[0] & PH_MASK_2; } static int get_hash_3(int index) { return (crypt_out[index])[0] & PH_MASK_3; } static int get_hash_4(int index) { return (crypt_out[index])[0] & PH_MASK_4; } static int get_hash_5(int index) { return (crypt_out[index])[0] & PH_MASK_5; } static int get_hash_6(int index) { return (crypt_out[index])[0] & PH_MASK_6; } #endif static int binary_hash_0(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((ARCH_WORD_64*)binary)[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((ARCH_WORD_64*)binary)[0] == crypt_out[HASH_IDX]) return 1; #else if ( ((ARCH_WORD_64*)binary)[0] == crypt_out[index][0] ) return 1; #endif return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 return (((ARCH_WORD_64*)binary)[0] == crypt_out[HASH_IDX]); #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { ARCH_WORD_64 *binary = get_binary(source); #if SIMD_COEF_64 char *key = get_key(index); UTF16 wkey[PLAINTEXT_LENGTH]; SHA512_CTX ctx; ARCH_WORD_64 crypt_out[DIGEST_SIZE / sizeof(ARCH_WORD_64)]; int len; len = enc_to_utf16(wkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (len < 0) len = strlen16(wkey); len *= 2; SHA512_Init(&ctx); SHA512_Update(&ctx, wkey, len); SHA512_Update(&ctx, cursalt, SALT_SIZE); SHA512_Final((unsigned char*)crypt_out, &ctx); alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha512_reverse(crypt_out); #endif return !memcmp(binary, crypt_out, DIGEST_SIZE); #else return !memcmp(binary, crypt_out[index], DIGEST_SIZE); #endif } static int salt_hash(void *salt) { // The >> 8 gave much better distribution on a huge set I analysed // although that was mssql05 return (*((ARCH_WORD_32 *)salt) >> 8) & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_mssql12 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, #ifdef SIMD_COEF_64 clear_keys, #else fmt_default_clear_keys, #endif crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_fp64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_fp32 // op(A') function: GB_tran__abs_fp64_fp32 // C type: double // A type: float // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ float #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_fp32 ( double *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gt_gtf.c
#include "gt_gtf.h" GT_INLINE gt_gtf_entry* gt_gtf_entry_new(const uint64_t start, const uint64_t end, const gt_strand strand, gt_string* const type){ gt_gtf_entry* entry = malloc(sizeof(gt_gtf_entry)); entry->uid = 0; entry->start = start; entry->end = end; entry->num_children = 0; entry->type = type; entry->strand = strand; entry->gene_type = NULL; entry->gene_id = NULL; entry->transcript_id = NULL; entry->length = 0; return entry; } GT_INLINE void gt_gtf_entry_delete(gt_gtf_entry* const entry){ free(entry); } GT_INLINE gt_gtf_ref* gt_gtf_ref_new(void){ gt_gtf_ref* ref = malloc(sizeof(gt_gtf_ref)); ref->entries = gt_vector_new(GTF_DEFAULT_ENTRIES, sizeof(gt_gtf_entry*)); return ref; } GT_INLINE void gt_gtf_ref_delete(gt_gtf_ref* const ref){ register uint64_t s = gt_vector_get_used(ref->entries); register uint64_t i = 0; for(i=0; i<s; i++){ gt_gtf_entry_delete( (gt_vector_get_elm(ref->entries, i, gt_gtf_entry))); } gt_vector_delete(ref->entries); free(ref); } GT_INLINE gt_gtf* gt_gtf_new(void){ gt_gtf* gtf = malloc(sizeof(gt_gtf)); gtf->refs = gt_shash_new(); gtf->types = gt_shash_new(); gtf->gene_ids = gt_shash_new(); gtf->transcript_ids = gt_shash_new(); gtf->gene_types = gt_shash_new(); gtf->genes = gt_shash_new(); gtf->transcripts = gt_shash_new(); return gtf; } GT_INLINE void gt_gtf_delete(gt_gtf* const gtf){ gt_shash_delete(gtf->refs, true); gt_shash_delete(gtf->types, true); gt_shash_delete(gtf->gene_ids, true); gt_shash_delete(gtf->transcript_ids, true); gt_shash_delete(gtf->gene_types, true); gt_shash_delete(gtf->genes, false); gt_shash_delete(gtf->transcripts, false); free(gtf); } GT_INLINE gt_gtf_hits* gt_gtf_hits_new(void){ gt_gtf_hits* hits = malloc(sizeof(gt_gtf_hits)); hits->exon_hits = gt_vector_new(16, sizeof(gt_gtf_hit*)); hits->num_genes = 0; hits->num_protein_coding =0; hits->num_paired_genes =0; return hits; } GT_INLINE void gt_gtf_hits_delete(gt_gtf_hits* const hits){ gt_gtf_hits_clear(hits); gt_vector_delete(hits->exon_hits); free(hits); } GT_INLINE void gt_gtf_hits_clear(gt_gtf_hits* const hits){ uint64_t i = 0; for(i=0; i<gt_vector_get_used(hits->exon_hits); i++){ gt_gtf_hit* hit = *gt_vector_get_elm(hits->exon_hits, i, gt_gtf_hit*); gt_gtf_hit_delete(hit); } hits->num_genes = 0; hits->num_protein_coding =0; hits->num_paired_genes =0; hits->junction_hit_ration = 0.0; gt_vector_clear(hits->exon_hits); } GT_INLINE gt_gtf_count_parms* gt_gtf_count_params_new(bool coverage){ gt_gtf_count_parms* p = gt_malloc_(1, sizeof(gt_gtf_count_parms), false, false); p->num_maps = 0; p->exon_overlap = 0; p->unweighted_counts = true; p->single_pair_counts = false; p->num_junctions = 0; p->count_bases = false; p->num_annotated_junctions = 0; if(coverage){ p->single_transcript_coverage = GT_GTF_INIT_COVERAGE(); p->gene_body_coverage = GT_GTF_INIT_COVERAGE(); }else{ p->single_transcript_coverage = NULL; p->gene_body_coverage = NULL; } return p; } GT_INLINE void gt_gtf_count_params_delete(gt_gtf_count_parms* params){ if(params->single_transcript_coverage != NULL){ free(params->single_transcript_coverage); } if(params->gene_body_coverage != NULL){ free(params->gene_body_coverage); } free(params); } GT_INLINE gt_string* gt_gtf_get_type(const gt_gtf* const gtf, char* const type){ if(!gt_gtf_contains_type(gtf, type)){ gt_string* s = gt_string_set_new(type); gt_shash_insert_string(gtf->types, type, s); } return gt_shash_get(gtf->types, type, gt_string); } GT_INLINE bool gt_gtf_contains_type(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->types, name); } GT_INLINE gt_gtf_ref* gt_gtf_get_ref(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_ref(gtf, name)){ gt_gtf_ref* rr = gt_gtf_ref_new(); gt_shash_insert(gtf->refs, name, rr, gt_gtf_ref*); } return gt_shash_get(gtf->refs, name, gt_gtf_ref); } GT_INLINE bool gt_gtf_contains_ref(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->refs, name); } GT_INLINE gt_string* gt_gtf_get_gene_id(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_gene_id(gtf, name)){ gt_string* const gene_id = gt_string_set_new(name); gt_shash_insert(gtf->gene_ids, name, gene_id, gt_string*); } return gt_shash_get(gtf->gene_ids, name, gt_string); } GT_INLINE bool gt_gtf_contains_gene_id(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->gene_ids, name); } GT_INLINE gt_string* gt_gtf_get_transcript_id(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_transcript_id(gtf, name)){ gt_string* const gene_id = gt_string_set_new(name); gt_shash_insert(gtf->transcript_ids, name, gene_id, gt_string*); } return gt_shash_get(gtf->transcript_ids, name, gt_string); } GT_INLINE bool gt_gtf_contains_transcript_id(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->transcript_ids, name); } GT_INLINE gt_string* gt_gtf_get_gene_type(const gt_gtf* const gtf, char* const name){ if(!gt_gtf_contains_gene_type(gtf, name)){ gt_string* const gene_type = gt_string_set_new(name); gt_shash_insert(gtf->gene_types, name, gene_type, gt_string*); } return gt_shash_get(gtf->gene_types, name, gt_string); } GT_INLINE bool gt_gtf_contains_gene_type(const gt_gtf* const gtf, char* const name){ return gt_shash_is_contained(gtf->gene_types, name); } GT_INLINE gt_gtf_entry* gt_gtf_get_gene_by_id(const gt_gtf* const gtf, char* const key){ if(gt_shash_is_contained(gtf->genes, key)){ return gt_shash_get_element(gtf->genes, key); } return NULL; } GT_INLINE gt_gtf_entry* gt_gtf_get_transcript_by_id(const gt_gtf* const gtf, char* const key){ if(gt_shash_is_contained(gtf->transcripts, key)){ return gt_shash_get_element(gtf->transcripts, key); } return NULL; } /** * Comparator that compares two gtf_entries by starting position */ GT_INLINE int gt_gtf_sort_by_start_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){ uint64_t p1 = (*a)->start; uint64_t p2 = (*b)->start; return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type )); } /** * Comparator that compares two gtf_entries by ending position */ GT_INLINE int gt_gtf_sort_by_end_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){ uint64_t p1 = (*a)->end; uint64_t p2 = (*b)->end; return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type )); } /** * Sort vector of gt_gtf_entries by starting position */ GT_INLINE void gt_gtf_sort_by_start(gt_vector* entries) { qsort(gt_vector_get_mem(entries, gt_gtf_entry*), gt_vector_get_used(entries), sizeof(gt_gtf_entry**), (int (*)(const void *,const void *))gt_gtf_sort_by_start_cmp_); } /** * Sort vector of gt_gtf_entries by ending position */ GT_INLINE void gt_gtf_sort_by_end( gt_vector* entries) { qsort(gt_vector_get_mem(entries, gt_gtf_entry*), gt_vector_get_used(entries), sizeof(gt_gtf_entry**), (int (*)(const void *,const void *))gt_gtf_sort_by_end_cmp_); } GT_INLINE gt_gtf_node* gt_gtf_create_node(gt_vector* entries){ const uint64_t len = gt_vector_get_used(entries); if(len == 0){ return NULL; } gt_gtf_node* const node = malloc(sizeof(gt_gtf_node)); const gt_gtf_entry* mid = *gt_vector_get_elm(entries, len/2, gt_gtf_entry*); node->midpoint = mid->start + ((mid->end - mid->start)/2); node->entries_by_end = gt_vector_new(16, sizeof(gt_gtf_entry*)); node->entries_by_start = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_vector* to_left = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_vector* to_right = gt_vector_new(16, sizeof(gt_gtf_entry*)); GT_VECTOR_ITERATE(entries, element, counter, gt_gtf_entry*){ if((*element)->end < node->midpoint){ gt_vector_insert(to_left, (*element), gt_gtf_entry*); }else if((*element)->start > node->midpoint){ gt_vector_insert(to_right, (*element), gt_gtf_entry*); }else{ gt_vector_insert(node->entries_by_end, (*element), gt_gtf_entry*); gt_vector_insert(node->entries_by_start, (*element), gt_gtf_entry*); } } // sort the start and end lists gt_gtf_sort_by_start(node->entries_by_start); gt_gtf_sort_by_end(node->entries_by_end); // delete incoming entry list gt_vector_delete(entries); if(gt_vector_get_used(to_left) > 0){ // create left node node->left = gt_gtf_create_node(to_left); }else{ node->left = NULL; gt_vector_delete(to_left); } if(gt_vector_get_used(to_right) > 0){ // create right node node->right = gt_gtf_create_node(to_right); }else{ node->right = NULL; gt_vector_delete(to_right); } return node; } /* * Read next tab separated field from line or return NULL if no such field exists */ GT_INLINE char* gt_gtf_read_gtf_field_(char** line){ char* current = *line; GT_READ_UNTIL(line, **line=='\t'); if(GT_IS_EOL(line)) return NULL; **line = EOS; GT_NEXT_CHAR(line); return current; } GT_INLINE gt_status gt_gtf_read_attributes_(char** line, gt_shash* attrs){ gt_shash_clear(attrs, false); while(!GT_IS_EOL(line)){ while(**line == ' ') GT_NEXT_CHAR(line); if(**line == EOL || **line == EOS) return GT_STATUS_OK; // get the attribute name char* name = *line; GT_READ_UNTIL(line, **line==' ') if(GT_IS_EOL(line)){ gt_error_msg("Error parsing GTF attributes. Expected space but found end of line"); return GT_GTF_INVALID_LINE; } **line = EOS; GT_NEXT_CHAR(line); // skip to attribute start while(**line == ' ') GT_NEXT_CHAR(line); // remove starting quote if(**line == '"') GT_NEXT_CHAR(line); char* attr = *line; // skip until the closing ; while(**line != ';') GT_NEXT_CHAR(line); if(GT_IS_EOL(line)) return GT_GTF_INVALID_LINE; // remove trailing quotes and add EOS if(*(*line-1) == '"') *(*line-1) = EOS; else **line = EOS; GT_NEXT_CHAR(line); // add attribute if(gt_shash_is_contained(attrs, name)){ gt_shash_remove(attrs, name, false); } gt_shash_insert(attrs, name, attr, char*); if(gt_shash_is_contained(attrs, "gene_id") && gt_shash_is_contained(attrs, "gene_type") && gt_shash_is_contained(attrs, "transcript_id")){ return GT_STATUS_OK; } } return GT_STATUS_OK; } /** * Parse a single GTF line */ GT_INLINE gt_status gt_gtf_read_line(char* line, gt_gtf* const gtf, uint64_t counter, gt_shash* attrs){ // skip comments if(line[0] == '#'){ return GT_STATUS_OK; } char* ref = NULL; char* type = NULL; uint64_t start = 0; uint64_t end = 0; gt_strand strand = UNKNOWN; char* current = line; ref = gt_gtf_read_gtf_field_(&line); if(ref == NULL){ gt_error_msg("Unable to parse name: '%s'", line); return GT_GTF_INVALID_LINE; } // SKIP source current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse source: '%s'", line); return GT_GTF_INVALID_LINE; } // type type = gt_gtf_read_gtf_field_(&line); if(type == NULL){ gt_error_msg("Unable to parse type: '%s'", line); return GT_GTF_INVALID_LINE; } // start current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse start: '%s'", line); return GT_GTF_INVALID_LINE; } start = atol(current); // end current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse end: '%s'", line); return GT_GTF_INVALID_LINE; } end = atol(current); // SKIP score current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse score: '%s'", line); return GT_GTF_INVALID_LINE; } // strand current = gt_gtf_read_gtf_field_(&line); if(current == NULL) return GT_GTF_INVALID_LINE; if(current == NULL){ gt_error_msg("Unable to parse strand: '%s'", line); return GT_GTF_INVALID_LINE; } if(*current == '+'){ strand = FORWARD; }else if(*current == '-'){ strand = REVERSE; } // SIKP last thing where i can not remember what it was current = gt_gtf_read_gtf_field_(&line); if(current == NULL){ gt_error_msg("Unable to parse last: '%s'", line); return GT_GTF_INVALID_LINE; } // WARNING >>> the attribute parser stops after // the currently used feels are found. If you want // to add a field, also update the attribute parser if(gt_gtf_read_attributes_(&line, attrs) != GT_STATUS_OK){ gt_error_msg("Unable to parse attributes: '%s'", line); return GT_GTF_INVALID_ATTRIBUTES; } // get the type or create it gt_string* tp = gt_gtf_get_type(gtf, type); gt_gtf_entry* e = gt_gtf_entry_new(start, end, strand, tp); e->uid = counter; if(gt_shash_is_contained(attrs, "gene_id")){ e->gene_id = gt_gtf_get_gene_id(gtf, gt_shash_get(attrs, "gene_id", char)); } if(gt_shash_is_contained(attrs, "gene_type")){ e->gene_type = gt_gtf_get_gene_type(gtf, gt_shash_get(attrs, "gene_type", char)); } if(gt_shash_is_contained(attrs, "transcript_id")){ e->transcript_id = gt_gtf_get_transcript_id(gtf, gt_shash_get(attrs, "transcript_id", char)); } // get the ref or create it gt_gtf_ref* gtref = gt_gtf_get_ref(gtf, ref); gt_vector_insert(gtref->entries, e, gt_gtf_entry*); if(strcmp(e->type->buffer, "gene") == 0){ gt_shash_insert(gtf->genes, e->gene_id->buffer, e, gt_gtf_entry*); } if(strcmp(e->type->buffer, "transcript") == 0){ gt_shash_insert(gtf->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*); } return GT_STATUS_OK; } bool gt_gtf_hits_junction(gt_map* map, gt_gtf_entry* e){ uint64_t rs = gt_map_get_begin_mapping_position(map); uint64_t re = gt_map_get_end_mapping_position(map); bool hit = (rs==e->start) || (rs==e->end) || (re == e->end) || (re == e->start); return hit; } GT_INLINE uint64_t gt_gtf_get_map_begin(gt_map* const map){ return gt_map_get_begin_mapping_position(map) + gt_map_get_left_trim_length(map); } GT_INLINE uint64_t gt_gtf_get_map_end(gt_map* const map){ return gt_map_get_end_mapping_position(map); } /** * Iterate over the map blocks and count exon-exon junctions that are annotated */ GT_INLINE uint64_t gt_gtf_count_junction(const gt_gtf* const gtf, gt_map* const map){ uint64_t blocks = gt_map_get_num_blocks(map); if(blocks <= 1) return 0; // single block map uint64_t num_junctions = 0; char* seq_name = gt_map_get_seq_name(map); gt_vector* hits = gt_vector_new(16, sizeof(gt_gtf_entry*)); gt_shash* last_hits = NULL; GT_MAP_ITERATE(map, block){ uint64_t start = gt_map_get_begin_mapping_position(block); uint64_t end = gt_map_get_end_mapping_position(block); if(last_hits != NULL){ // there was a block before, check if we found an annotated junction gt_gtf_search(gtf, hits, seq_name, start, start, true); GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){ gt_gtf_entry* hit = *e; if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){ if(gt_shash_is_contained(last_hits, hit->transcript_id->buffer)){ num_junctions++; break; } } } } if(last_hits == NULL) last_hits = gt_shash_new(); else gt_shash_clear(last_hits, true); // search for the overlaps with the end of the block gt_gtf_search(gtf, hits, seq_name, end, end, true); GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){ gt_gtf_entry* hit = *e; if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){ gt_gtf_count_(last_hits, hit->transcript_id->buffer); } } } gt_vector_delete(hits); gt_shash_delete(last_hits, true); return num_junctions; } void gt_gtf_print_entry_(FILE* target, gt_gtf_entry* e, gt_map* map){ if(map != NULL){ gt_output_map_fprint_map(target, map, NULL); fprintf(target, " ==> "); } if(e->type != NULL){ fprintf(target, "%s : %"PRIu64" - %"PRIu64" (%c)", e->type->buffer, e->start, e->end, (e->strand==FORWARD?'+':'-') ); } if(e->gene_id != NULL){ fprintf(target, " GID:%s", e->gene_id->buffer); } if(e->transcript_id != NULL){ fprintf(target, " TID:%s", e->transcript_id->buffer); } if(e->type != NULL){ fprintf(target, " [%s]", e->type->buffer); } if(e->gene_type != NULL){ fprintf(target, " [%s]", e->gene_type->buffer); } fprintf(target, " [#transcripts: %"PRIu64"]", e->num_children); if(map != NULL && gt_gtf_hits_junction(map, e)){ fprintf(target, " [Hits JS]"); } fprintf(target, "\n"); } GT_INLINE gt_gtf_hit* gt_gtf_hit_new(void){ gt_gtf_hit* hit = malloc(sizeof(gt_gtf_hit)); hit->exon_overlap = 0.0; hit->intron_length = 0.0; hit->is_protein_coding = false; hit->junction_hits = 0.0; hit->map = NULL; hit->num_junctions = 0; hit->pairs_transcript = false; hit->pairs_splits = false; hit->pairs_gene = false; hit->num_junctions_hits =0; hit->num_template_blocks = 0; hit->transcripts = NULL; hit->genes = NULL; hit->hits_exon = false; return hit; } GT_INLINE void gt_gtf_hit_delete(gt_gtf_hit* hit){ if(hit->transcripts != NULL){ gt_shash_delete(hit->transcripts, true); } if(hit->genes != NULL){ gt_shash_delete(hit->genes, true); } free(hit); } GT_INLINE gt_status gt_gtf_reload_buffer(gt_buffered_input_file* const buffered_fasta_input) { GT_BUFFERED_INPUT_FILE_CHECK(buffered_fasta_input); // Dump buffer if BOF it attached to input, and get new out block (always FIRST) gt_buffered_input_file_dump_attached_buffers(buffered_fasta_input->attached_buffered_output_file); // Read new input block const uint64_t read_lines = gt_buffered_input_file_get_block(buffered_fasta_input, GT_NUM_LINES_50K); if (gt_expect_false(read_lines==0)) return GT_INPUT_FILE_EOF; // Assign block ID gt_buffered_input_file_set_id_attached_buffers(buffered_fasta_input->attached_buffered_output_file,buffered_fasta_input->block_id); return GT_STATUS_OK; } GT_INLINE gt_status gt_gtf_get_line(gt_buffered_input_file* const buffered_input, gt_string* const line) { GT_BUFFERED_INPUT_FILE_CHECK(buffered_input); GT_STRING_CHECK(line); gt_status error_code; // Check the end_of_block. Reload buffer if needed if (gt_buffered_input_file_eob(buffered_input)) { if ((error_code=gt_gtf_reload_buffer(buffered_input))!=GT_IMP_OK) return error_code; } // Prepare the template char* const line_start = buffered_input->cursor; gt_string_clear(line); GT_INPUT_FILE_SKIP_LINE(buffered_input); gt_string_set_nstring_static(line, line_start, (buffered_input->cursor - line_start)); return GT_IMP_OK; } GT_INLINE uint64_t gt_gtf_merge_(const gt_gtf* const target, gt_gtf* source, uint64_t counter){ // get the type or create it GT_SHASH_BEGIN_KEY_ITERATE(source->refs, key){ gt_gtf_ref* source_ref = gt_gtf_get_ref(source, key); gt_gtf_ref* target_ref = gt_gtf_get_ref(target, key); GT_VECTOR_ITERATE(source_ref->entries, value, c, gt_gtf_entry*){ gt_gtf_entry* e = *value; e->uid = counter++; if(e->gene_id != NULL){ e->gene_id = gt_gtf_get_gene_id(target, gt_string_get_string(e->gene_id)); } if(e->transcript_id != NULL){ e->transcript_id = gt_gtf_get_transcript_id(target, gt_string_get_string(e->transcript_id)); } if(e->type != NULL)e->type = gt_gtf_get_type(target, gt_string_get_string(e->type)); if(e->gene_type != NULL)e->gene_type = gt_gtf_get_gene_type(target, gt_string_get_string(e->gene_type)); gt_vector_insert(target_ref->entries, e, gt_gtf_entry*); if(strcmp(e->type->buffer, GT_GTF_TYPE_GENE) == 0 && !gt_shash_is_contained(target->genes, e->gene_id->buffer)){ gt_shash_insert(target->genes, e->gene_id->buffer, e, gt_gtf_entry*); } if(strcmp(e->type->buffer, GT_GTF_TYPE_TRANSCRIPT) == 0 && !gt_shash_is_contained(target->transcripts, e->transcript_id->buffer)){ gt_shash_insert(target->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*); } } }GT_SHASH_END_ITERATE; return counter; } GT_INLINE gt_gtf* gt_gtf_read_from_stream(FILE* input, uint64_t threads){ gt_input_file* input_file = gt_input_stream_open(input); return gt_gtf_read(input_file, threads); } GT_INLINE gt_gtf* gt_gtf_read_from_file(char* input, uint64_t threads){ gt_input_file* input_file = gt_input_file_open(input, false); return gt_gtf_read(input_file, threads); } GT_INLINE gt_gtf* gt_gtf_read(gt_input_file* input_file, const uint64_t threads){ GT_NULL_CHECK(input_file); GT_ZERO_CHECK(threads); uint64_t counter = 0; uint64_t i = 0; gt_gtf* const gtf = gt_gtf_new(); gt_gtf** gtfs = gt_calloc(threads-1, gt_gtf*, true); for(i=0; i<threads-1; i++){ gtfs[i] = gt_gtf_new(); } #ifdef HAVE_OPENMP #pragma omp parallel num_threads(threads) #endif { #ifdef HAVE_OPENMP uint64_t tid = omp_get_thread_num(); #else uint64_t tid=0; #endif gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file); gt_string* buffered_line = gt_string_new(GTF_MAX_LINE_LENGTH); gt_gtf* thread_gtf; if(tid == 0){ thread_gtf = gtf; }else{ thread_gtf = gtfs[tid-1]; } gt_shash* attrs = gt_shash_new(); while(gt_gtf_get_line(buffered_input, buffered_line)){ if(gt_gtf_read_line(buffered_line->buffer, thread_gtf, buffered_input->current_line_num, attrs) != GT_STATUS_OK){ // raise error gt_fatal_error_msg("Failed to parse GTF line '%s'", buffered_line->buffer); } counter++; } gt_shash_delete(attrs, false); gt_buffered_input_file_close(buffered_input); gt_string_delete(buffered_line); } gt_input_file_close(input_file); counter = 0; // merge all the thread gtfs into a single one for(i=0; i<threads-1; i++){ counter = gt_gtf_merge_(gtf, gtfs[i], counter); gt_gtf_delete(gtfs[i]); } free(gtfs); gt_string* const exon_t = gt_string_set_new("exon"); gt_string* const transcript_t = gt_string_set_new("transcript"); gt_string* const intron_t = gt_string_set_new("intron"); // sort the refs GT_SHASH_BEGIN_ELEMENT_ITERATE(gtf->refs,shash_element,gt_gtf_ref) { // sort by start position gt_gtf_sort_by_start(shash_element->entries); uint64_t size = gt_vector_get_used(shash_element->entries); uint64_t i = 0; gt_shash* last_exons = gt_shash_new(); gt_shash* exons_counts = gt_shash_new(); for(i=0; i<size; i++){ gt_gtf_entry* entry = *gt_vector_get_elm(shash_element->entries, i, gt_gtf_entry*); if(entry->type != NULL && gt_string_equals(exon_t, entry->type)){ gt_string* transcript_id = entry->transcript_id; if(transcript_id != NULL){ // set exon id and count the exon for the transcript entry->num_children = gt_gtf_get_count_(exons_counts, transcript_id->buffer); gt_gtf_count_(exons_counts, transcript_id->buffer); if(!gt_shash_is_contained(last_exons, gt_string_get_string(transcript_id))){ gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*); }else{ gt_gtf_entry* prev_exon = gt_shash_get_element(last_exons, gt_string_get_string(transcript_id)); gt_gtf_entry* intron = gt_gtf_entry_new(prev_exon->end+1, entry->start-1, prev_exon->strand, intron_t); intron->transcript_id = transcript_id; intron->gene_id = prev_exon->gene_id; intron->uid = counter++; gt_vector_insert(shash_element->entries, intron, gt_gtf_entry*); gt_shash_remove(last_exons, gt_string_get_string(transcript_id),false); gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*); } // add exon counts gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, gt_string_get_string(entry->transcript_id)); if(transcript != NULL){ transcript->num_children++; entry->length = transcript->length; transcript->length += (entry->end - entry->start) + 1; } } }else if(entry->type != NULL && gt_string_equals(transcript_t, entry->type)){ // sum transcript counts for gene id if(entry->gene_id != NULL){ gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf, gt_string_get_string(entry->gene_id)); gene->num_children++; } } } gt_shash_delete(last_exons, false); gt_shash_delete(exons_counts, true); // create a interval tree node for each ref shash_element->node = gt_gtf_create_node(shash_element->entries); } GT_SHASH_END_ITERATE return gtf; } /* * Binary search for start position */ GT_INLINE uint64_t gt_gtf_bin_search(gt_vector* const entries, const uint64_t t, const uint64_t end){ uint64_t used = gt_vector_get_used(entries); uint64_t l = 0; uint64_t h = used - 1; uint64_t m = 0; register gt_gtf_entry* e = *gt_vector_get_elm(entries, h, gt_gtf_entry*); while(l < h ){ m = (l + h) / 2; e = *gt_vector_get_elm(entries, m, gt_gtf_entry*); if(e->start < t){ l = m + 1; }else{ h = m; } } e = *gt_vector_get_elm(entries, l, gt_gtf_entry*); if (h == l){ return l; }else{ return m; } } GT_INLINE void gt_gtf_search_node_(gt_gtf_node* node, const uint64_t start, const uint64_t end, gt_vector* const target){ if(node == NULL) return; // add overlapping intervals from this node GT_VECTOR_ITERATE(node->entries_by_start, element, counter, gt_gtf_entry*){ if((*element)->start > end){ break; } gt_gtf_entry* e = *element; //if((*element)->start <= start && (*element)->end >= end){ if((start < e->end && end > e->start) || (start >= e->start && end <=e->end) || (start < e->end && end >= e->end) || (start < e->start && end > e->end)){ gt_vector_insert(target, (*element), gt_gtf_entry*); } } if(end < node->midpoint || start < node->midpoint){ // search left tree gt_gtf_search_node_(node->left, start, end, target); } if (start > node->midpoint || end > node->midpoint){ gt_gtf_search_node_(node->right, start, end, target); } } GT_INLINE uint64_t gt_gtf_search(const gt_gtf* const gtf, gt_vector* const target, char* const ref, const uint64_t start, const uint64_t end, const bool clear_target){ if(clear_target)gt_vector_clear(target); // make sure the target ref is contained if (! gt_shash_is_contained(gtf->refs, ref)){ return 0; } const gt_gtf_ref* const source_ref = gt_gtf_get_ref(gtf, ref); gt_gtf_search_node_(source_ref->node, start, end, target); return gt_vector_get_used(target); } GT_INLINE void gt_gtf_count_(gt_shash* const table, char* const element){ if(!gt_shash_is_contained(table, element)){ uint64_t* v = gt_malloc_uint64(); *v = 1; gt_shash_insert(table, element, v, uint64_t); }else{ uint64_t* v = gt_shash_get(table,element,uint64_t); ++(*v); } } GT_INLINE void gt_gtf_count_custom_(gt_shash* const table, char* const element, uint64_t c){ if(!gt_shash_is_contained(table, element)){ uint64_t* v = gt_malloc_uint64(); *v = c; gt_shash_insert(table, element, v, uint64_t); }else{ uint64_t* v = gt_shash_get(table,element,uint64_t); *v += c; } } GT_INLINE void gt_gtf_count_sum_(gt_shash* const table, char* const element, uint64_t value){ if(!gt_shash_is_contained(table, element)){ uint64_t* v = gt_malloc_uint64(); *v = value; gt_shash_insert(table, element, v, uint64_t); }else{ uint64_t* v = gt_shash_get(table,element,uint64_t); *v += value; } } GT_INLINE void gt_gtf_count_weight_(gt_shash* const table, char* const element, double weight){ if(!gt_shash_is_contained(table, element)){ double* v = malloc(sizeof(double*)); *v = weight; gt_shash_insert(table, element, v, double); }else{ double* v = gt_shash_get(table,element,double); *v += weight; } } GT_INLINE uint64_t gt_gtf_get_count_(gt_shash* const table, char* const element){ if(!gt_shash_is_contained(table, element)){ return 0; } uint64_t* v = gt_shash_get(table,element,uint64_t); return *v; } GT_INLINE float gt_gtf_get_count_weight(gt_shash* const table, char* const element){ if(!gt_shash_is_contained(table, element)){ return 0.0; } double* v = gt_shash_get(table,element,double); return *v; } GT_INLINE void gt_gtf_create_hit(gt_vector* search_hits, gt_shash* all_genes, gt_gtf_hits* hits, gt_gtf_hit* template_hit){ template_hit->transcripts = gt_shash_new(); template_hit->genes = gt_shash_new(); template_hit->is_protein_coding = false; template_hit->hits_exon = false; bool counted_protein = false; // set gene count GT_SHASH_BEGIN_ITERATE(all_genes, gene_id, c, uint64_t){ gt_gtf_count_sum_(template_hit->genes, gene_id, *c); }GT_SHASH_END_ITERATE; GT_VECTOR_ITERATE(search_hits, v, c, gt_gtf_entry*){ gt_gtf_entry* e = *v; // count transcript if(e->transcript_id != NULL){ gt_gtf_count_(template_hit->transcripts, gt_string_get_string(e->transcript_id)); } if(!template_hit->hits_exon && strcmp(e->type->buffer, "exon") == 0){ template_hit->hits_exon = true; } if(!counted_protein && e->gene_type != NULL){ template_hit->is_protein_coding |= (strcmp(e->gene_type->buffer, "protein_coding") == 0); hits->num_protein_coding++; counted_protein = true; } } template_hit->pairs_gene = (gt_shash_get_num_elements(all_genes) > 1); // single gene template_hit->pairs_transcript = (gt_shash_get_num_elements(template_hit->transcripts) == 1); // single gene hits->num_paired_genes += (template_hit->pairs_gene ? 1 : 0); gt_vector_insert(hits->exon_hits, template_hit, gt_gtf_hit*); } GT_INLINE void gt_gtf_search_template_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_template* const template_src){ gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); // reset the hits gt_gtf_hits_clear(hits); gt_shash* all_genes = gt_shash_new(); // process paired alignment GT_TEMPLATE_ITERATE_MMAP__ATTR_(template_src,mmap,mmap_attr) { gt_gtf_hit* template_hit = gt_gtf_hit_new(); template_hit->num_template_blocks = gt_template_get_num_blocks(template_src); template_hit->mmap = mmap; template_hit->map = NULL; template_hit->map_attributes = mmap_attr; template_hit->num_junctions = (gt_map_get_num_blocks(mmap[0]) + gt_map_get_num_blocks(mmap[1])) - 2; template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, mmap[0]) + gt_gtf_count_junction(gtf, mmap[1]); double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions; if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio; gt_shash_clear(all_genes, true); gt_gtf_count_map(gtf, mmap[0], mmap[1], NULL, all_genes, NULL, NULL); gt_gtf_search_map(gtf, search_hits, mmap[0], true); gt_gtf_search_map(gtf, search_hits, mmap[1], false); gt_gtf_create_hit(search_hits, all_genes, hits, template_hit); hits->num_genes += gt_shash_get_num_elements(all_genes); } gt_shash_delete(all_genes, true); gt_vector_delete(search_hits); } GT_INLINE void gt_gtf_search_alignment_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_alignment* const alignment){ gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); // reset the hits gt_gtf_hits_clear(hits); gt_shash* all_genes = gt_shash_new(); // process paired alignment GT_ALIGNMENT_ITERATE(alignment, map){ gt_gtf_hit* template_hit = gt_gtf_hit_new(); template_hit->map = map; template_hit->mmap = NULL; template_hit->num_junctions = gt_map_get_num_blocks(map) - 1; template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, map); template_hit->num_template_blocks = 1; double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions; if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio; gt_shash_clear(all_genes, false); gt_gtf_count_map(gtf, map, NULL, NULL, all_genes, NULL, NULL); gt_gtf_search_map(gtf, search_hits, map, true); gt_gtf_create_hit(search_hits, all_genes, hits, template_hit); hits->num_genes += gt_shash_get_num_elements(all_genes); } gt_shash_delete(all_genes, false); gt_vector_delete(search_hits); } GT_INLINE void gt_gtf_count_add_(gt_shash* const source, gt_shash* const target){ GT_SHASH_BEGIN_ITERATE(source, key, value, uint64_t){ if(!gt_shash_is_contained(target, key)){ uint64_t* v = gt_malloc_uint64(); *v = *value; gt_shash_insert(target, key, v, uint64_t); }else{ uint64_t* v = gt_shash_get(target,key,uint64_t); *v += (*value); } }GT_SHASH_END_ITERATE; } GT_INLINE void gt_gtf_add_coverage(uint64_t* store, const uint64_t transcript_length, const uint64_t bucket){ // add to all store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_ALL, bucket)] += 1; if(transcript_length <= 150){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_150, bucket)] += 1; } if(transcript_length > 150 && transcript_length <= 250){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_250, bucket)] += 1; } if(transcript_length > 250 && transcript_length <= 500){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_500, bucket)] += 1; } if(transcript_length > 500 && transcript_length <= 1000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_1000, bucket)] += 1; } if(transcript_length > 1000 && transcript_length <= 2500){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_2500, bucket)] += 1; } if(transcript_length > 2500 && transcript_length <= 5000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_5000, bucket)] += 1; } if(transcript_length > 5000 && transcript_length <= 7500){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_7500, bucket)] += 1; } if(transcript_length > 7500 && transcript_length <= 10000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_10000, bucket)] += 1; } if(transcript_length > 10000 && transcript_length <= 15000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_15000, bucket)] += 1; } if(transcript_length > 15000 && transcript_length <= 20000){ store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_20000, bucket)] += 1; } } GT_INLINE void gt_gtf_count_coverage_(const gt_gtf* const gtf, gt_map* const map, char* gene_id, gt_gtf_count_parms* params){ // get coordinates uint64_t start = gt_gtf_get_map_begin(map); uint64_t end = gt_gtf_get_map_end(map); if(start > end){ return; // happens for (1)>123*... trim followed by split } uint64_t map_length = (end-start)+1; if(map_length <= 1){ // count only maps with at least 2 bases in length return; } // store the search hits and search gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true); GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){ gt_gtf_entry* hit = *e; if(hit->transcript_id == NULL) continue; // no transcript id if(hit->type == NULL || strcmp("exon", hit->type->buffer) != 0) continue; // no exon or no type if(gene_id != NULL && (hit->gene_id == NULL || strcmp(hit->gene_id->buffer, gene_id) != 0)) continue; // we are looking for a specific gene_id gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, hit->transcript_id->buffer); if(transcript == NULL || transcript->length <= 100){ continue; } if(hit->gene_id == NULL) continue; // no gene id on the hit gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf,hit->gene_id->buffer); if(gene == NULL) continue; // no gene found if(gene_id != NULL && strcmp(gene_id, gene->gene_id->buffer) != 0) continue; // we are looking for a specific hit uint64_t exon_length = (hit->end - hit->start) + 1; int64_t rel_start = start - hit->start; int64_t rel_end = (rel_start + map_length) - 1; if(rel_start < 0){ rel_start = 0; } if(rel_end > exon_length){ rel_end = exon_length; } if(rel_start >= 0 && rel_end <= exon_length){ // contained in range // count for exon count uint64_t start_bucket = (((rel_start/(double)exon_length) * 100.0) + 0.5) - 1; uint64_t end_bucket = (((rel_end/(double)exon_length) * 100.0) + 0.5) - 1; uint64_t s = 0; if(start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){ // handle reverse strand and flip coordinates if(hit->strand == REVERSE){ uint64_t tmp = start_bucket; start_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - end_bucket; end_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - tmp; } // scale up // count for global count and make exon coordinates relative to transcript // coordinate range uint64_t hit_start_on_transcript = hit->length; if(hit->strand == REVERSE){ // flip the bucket start if this is a gene on reverse strand // the exon start/end is already flipped // so we just flip the order of the exons here hit_start_on_transcript = (transcript->length - hit_start_on_transcript) - exon_length; } uint64_t trans_start_bucket = ((((double)hit_start_on_transcript / (double)transcript->length) * 100.0) + 0.5) - 1; double scale = (double)exon_length / (double) transcript->length; start_bucket = (scale * (double)start_bucket) + trans_start_bucket; end_bucket = (scale * (double)end_bucket) + trans_start_bucket; if(start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){ for(s=start_bucket;s<=end_bucket; s++){ //fprintf(stderr, ">>>GLOBAL COUNT %s : %"PRIu64" S/E: %"PRIu64" %"PRIu64" (%"PRIu64") Exon: %"PRIu64" %"PRIu64"\n", transcript->transcript_id->buffer, s, start, end, map_length, hit->start, hit->end); // count gene body coverage gt_gtf_add_coverage(params->gene_body_coverage, transcript->length, s); // count single transcript if( gene->num_children == 1){ gt_gtf_add_coverage(params->single_transcript_coverage, transcript->length, s); } } } }else{ gt_fatal_error_msg("Coverage overlap out of range %"PRIu64" %"PRIu64, start_bucket, end_bucket); } } } gt_vector_delete(hits); } /** * This counts a single continuous block and takes the. Note that we do not perform any checks on * splits/pairs here and simply count for this single continuous map * * @param gt_gtf* gtf the gtf reference * @param gt_map* continuous map block * @param gt_shash* type_counts the type counts, i.e exon/intron etc * @param gt_shash* gene_counts the gene counts with the gene_id's hit by the map. * @param gt_shash* exon_counts the exon counts with the gene_id's hit by the map. * @param gt_shash* junction_counts the number of annotated junctions that are hit per gene * @param float* overlap float pointer that is set to the maximum exon overlap of this block * @return uint64_t num_gene_exons number of unique gene_ids hit by exons */ GT_INLINE uint64_t gt_gtf_count_map_(const gt_gtf* const gtf, gt_map* const map, gt_shash* const type_counts, gt_shash* const gene_counts, gt_shash* const exon_counts, gt_shash* const junction_counts, float* overlap, uint64_t total_map_length, gt_gtf_count_parms* params){ // get coordinates uint64_t start = gt_gtf_get_map_begin(map); uint64_t end = gt_gtf_get_map_end(map); if(start > end){ gt_gtf_count_(type_counts, GT_GTF_TYPE_EMPTY_BLOCK); return 0; // happens for (1)>123*... where map starts with trim followed by split } uint64_t map_length = (end-start)+1; // store the search hits and search gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*)); gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true); // we do a complete local count for this block // and then merge the local count with the global count // to be able to resolve genes/gene_types that are // through wither the pair information or split information, // assuming that the counts for the other pair and/or the other split // are already contained in the globally presented count maps gt_shash* const local_type_counts = gt_shash_new(); gt_shash* local_gene_counts = gt_shash_new(); gt_shash* local_exon_gene_counts = gt_shash_new(); float max_overlap = 0.0; GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){ gt_gtf_entry* hit = *e; // count type gt_gtf_count_(local_type_counts, gt_string_get_string(hit->type)); // count gene id if(hit->gene_id != NULL){ gt_gtf_count_(local_gene_counts, gt_string_get_string(hit->gene_id)); } // count gene_id from exons if(hit->type != NULL && hit->gene_id != NULL && strcmp("exon", hit->type->buffer) == 0){ if(gt_gtf_hits_junction(map, hit)){ gt_gtf_count_(junction_counts, gt_string_get_string(hit->gene_id)); } gt_gtf_count_(local_exon_gene_counts, gt_string_get_string(hit->gene_id)); gt_gtf_count_(exon_counts, gt_string_get_string(hit->gene_id)); int64_t o = ((hit->end < end ? hit-> end : end) - (hit->start > start ? hit->start : start)) + 1; float block_overlap = o <= 0 ? 0.0 : ((float)o)/((float)(map_length)); if(block_overlap > max_overlap) max_overlap = block_overlap; if(block_overlap > 1.0){ gt_fatal_error_msg("Block overlap > 1.0\nMap : %"PRIu64" %"PRIu64" (%"PRIu64")\nExon :%"PRIu64" %"PRIu64" ", start, end, map_length, hit->start, hit->end); } } } *overlap += (max_overlap * ( (float)map_length / (float) total_map_length)); if(*overlap > 1.000001){ gt_output_map_fprint_map(stderr, map, NULL); fprintf(stderr, "\n"); gt_fatal_error_msg("Block overlap > 1.0 :: %.10f\nMap length : %"PRIu64" Total length: %"PRIu64" max overlap: %.10f", *overlap, map_length, total_map_length, max_overlap); } uint64_t num_gene_hit_exons = gt_shash_get_num_elements(local_exon_gene_counts); // count types and merge them with the global // counts. NOTE that the order matters here, so // we: // 1. check for NA hits where nothing is found // 2. count exon hits // 3. count intron hits // 4. count unknown if the hit was neither an intron nor exon hit // all counting steps are exclusive, thats why the order matters! if(gt_vector_get_used(hits) == 0){ // count 'NA' type if we did not hit anything gt_gtf_count_(type_counts, GT_GTF_TYPE_NA); }else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON) > 0){ gt_gtf_count_(type_counts, GT_GTF_TYPE_EXON); }else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON) > 0){ gt_gtf_count_(type_counts, GT_GTF_TYPE_INTRON); }else{ gt_gtf_count_(type_counts, GT_GTF_TYPE_UNKNOWN); } // make gene counts based on exon hits if we found at least one if(num_gene_hit_exons > 0){ GT_SHASH_BEGIN_KEY_ITERATE(local_exon_gene_counts, key){ gt_gtf_count_(gene_counts, key); }GT_SHASH_END_ITERATE; }else{ // add all gene counts GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){ gt_gtf_count_(gene_counts, key); }GT_SHASH_END_ITERATE; } // if(params->single_transcript_coverage != NULL){ // gt_gtf_count_coverage_(gtf, map, NULL, params); // } gt_shash_delete(local_gene_counts, true); gt_shash_delete(local_type_counts, true); gt_shash_delete(local_exon_gene_counts, true); gt_vector_delete(hits); return num_gene_hit_exons; } GT_INLINE uint64_t gt_gtf_join_(gt_string* buf, char* base, bool multi_gene, uint64_t blocks){ if(blocks == 0) return 0; uint64_t i = 0; uint64_t len = strlen(base); for(i=0; i<blocks; i++){ gt_string_right_append_string(buf, base, len); if(multi_gene){ gt_string_right_append_string(buf, "_mg", 3); } if(i<blocks-1){ gt_string_append_char(buf, '^'); } } return blocks; } GT_INLINE double gt_gtf_count_get_sum_(gt_shash* table){ double v = 0; GT_SHASH_BEGIN_ELEMENT_ITERATE(table, value, uint64_t){ v += *value; }GT_SHASH_END_ITERATE; return v; } GT_INLINE uint64_t gt_gtf_get_map_length(gt_map* const maps){ uint64_t map_length = 0; GT_MAP_ITERATE(maps, map){ // get coordinates uint64_t start = gt_gtf_get_map_begin(map); uint64_t end = gt_gtf_get_map_end(map); if(start > end){ continue; // happens for wired thigs like (1)>231*... where the map start with a trim followed by a split } map_length += (end-start)+1; } return map_length; } /** * Count a map. This respects split maps and unifies gene_id's based on the * the split. If the both sides of the split match multiple gene_ids but there is * a common gene_id on both side, only that id is counted. Otherwise a count is set * for all gene_ids. * In addition to the counts, if a pattern string is given, it is filled with the type * pattern with respect to split maps. For example: * * exon -> exon * exon and intron (split map) -> exon^intron * exon in multiple genes -> exon_mg * * The function returns the number of gene_ids hit by the map. * * The first map has to be specified, but the second one is options. If it is set, * the second map block is also checked and counted. * * * @param gt_gtf* gtf the gtf reference * @param gt_map* map1 the first map * @param gt_map* map2 the scond map * @param gt_shash* type_counts the type counts * @param gt_shash* gene_counts the gene counts * @param gt_string pattern the pattern string filled based on the types * @return uint64_t num_gene_hits the number of gene_ids hit by the map */ GT_INLINE uint64_t gt_gtf_count_map(const gt_gtf* const gtf, gt_map* const map1, gt_map* const map2, gt_shash* const pattern_counts, gt_shash* const gene_counts, gt_string* pattern, gt_gtf_count_parms* params){ // clear patterns if(pattern != NULL)gt_string_clear(pattern); // get number of blocks and ensure we have at least one uint64_t blocks = gt_map_get_num_blocks(map1); if(map2 != NULL){ blocks += gt_map_get_num_blocks(map2); } if(blocks == 0) return 0; // local counts for all blocks // and store the number of multi gene exon hits for each block // in addition we create the base pattern per block here gt_shash* const local_type_counts = gt_shash_new(); gt_shash* local_gene_counts = gt_shash_new(); gt_shash* local_gene_counts_1 = gt_shash_new(); gt_shash* local_gene_counts_2 = gt_shash_new(); gt_shash* local_junction_counts_1 = gt_shash_new(); gt_shash* local_junction_counts_2 = gt_shash_new(); gt_shash* local_exon_counts_1 = gt_shash_new(); gt_shash* local_exon_counts_2 = gt_shash_new(); uint64_t* const local_exon_gene_hits = malloc(blocks * sizeof(uint64_t)); gt_vector* const local_type_patterns = gt_vector_new(2, sizeof(char*)); uint64_t exons, introns, unknown, not_annotated, empty_blocks; exons = introns = unknown = not_annotated = empty_blocks =0; uint64_t i = 0; float block_1_overlap = 0.0; float block_2_overlap = 0.0; uint64_t map_1_length = gt_gtf_get_map_length(map1); GT_MAP_ITERATE(map1, map_block){ local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_1, local_exon_counts_1,local_junction_counts_1, &block_1_overlap, map_1_length, params); uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON); uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON); uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN); uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA); uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK); // add the pattern string based in the count value that changed if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*); if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*); if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*); if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*); if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*); exons = _exons; introns = _introns; unknown = _unknown; not_annotated = _not_annotated; empty_blocks = _empty_block; } // if we hit more than one gene, // try to unify the gene by checking the other blocks for // overlaps. If we find genes that are covered by all the // blocks we count only them. if(gt_shash_get_num_elements(local_gene_counts_1) > 1){ gt_shash* merged_counts = gt_shash_new(); uint64_t blocks1 = gt_map_get_num_blocks(map1); // search for the best junction hit uint64_t hits_junctions = 0; GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){ uint64_t m = gt_gtf_get_count_(local_junction_counts_1,gene_id); if(*count == blocks1 && m > 0){ if(m > hits_junctions) hits_junctions = m; } }GT_SHASH_END_ITERATE; GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){ if(*count == blocks1 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_1,gene_id) == hits_junctions)){ gt_gtf_count_sum_(merged_counts, gene_id, blocks1); } }GT_SHASH_END_ITERATE; // if we found some unique ids that are covered by both // we flip over to the merged counts gt_shash_delete(local_gene_counts_1, true); local_gene_counts_1 = merged_counts; // we fliped so we reset the exon gene hit counts to ones as well if(gt_shash_get_num_elements(merged_counts) > 0){ for(i=0;i<blocks1;i++){ if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1; } } } if(map2 != NULL){ uint64_t map_2_length = gt_gtf_get_map_length(map2); GT_MAP_ITERATE(map2, map_block){ local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_2, local_exon_counts_2, local_junction_counts_2, &block_2_overlap, map_2_length, params); uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON); uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON); uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN); uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA); uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK); // add the pattern string based in the count value that changed if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*); if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*); if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*); if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*); if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*); exons = _exons; introns = _introns; unknown = _unknown; not_annotated = _not_annotated; empty_blocks = _empty_block; } // unify the gene counts based on the number of blocks. // the gene_counts are reduced to either the ones that are found in // all blocks or they are kept as they are if(gt_shash_get_num_elements(local_gene_counts_2) > 1){ gt_shash* merged_counts = gt_shash_new(); uint64_t blocks2 = gt_map_get_num_blocks(map2); // search for the best junction hit uint64_t hits_junctions = 0; GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){ uint64_t m = gt_gtf_get_count_(local_junction_counts_2,gene_id); if(*count == blocks2 && m > 0){ if(m > hits_junctions) hits_junctions = m; } }GT_SHASH_END_ITERATE; GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){ if(*count == blocks2 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_2,gene_id) == hits_junctions)){ gt_gtf_count_sum_(merged_counts, gene_id, blocks2); } }GT_SHASH_END_ITERATE; // if we found some unique ids that are covered by both // we flip over to the merged counts gt_shash_delete(local_gene_counts_2, true); local_gene_counts_2 = merged_counts; if(gt_shash_get_num_elements(merged_counts) > 0){ uint64_t blocks1 = gt_map_get_num_blocks(map1); // we flipped so we reset the exon gene hit counts to ones as well for(i=blocks1;i<(blocks1+blocks2);i++){ if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1; } } } } /** * Merge everything into a single merged map */ gt_shash* merged_counts = gt_shash_new(); uint64_t blocks1 = gt_map_get_num_blocks(map1); uint64_t blocks2 = 0; if(map2 != NULL){ blocks2 = gt_map_get_num_blocks(map2); } float overlap = (block_1_overlap + block_2_overlap) / (float) (map2==NULL?1.0:2.0); uint64_t map2_hits = map2 != NULL ? gt_shash_get_num_elements(local_gene_counts_2) : 0; GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){ if( (gt_shash_is_contained(local_gene_counts_2, gene_id) || map2_hits == 0) && (params == NULL || params->exon_overlap <= 0.0 || overlap >= params->exon_overlap)){ uint64_t nv =*count + gt_gtf_get_count_(local_gene_counts_2, gene_id); gt_gtf_count_sum_(merged_counts, gene_id, nv); if(overlap > 1.000001){ gt_fatal_error_msg("Exon Overlap %.10f > 1.0 from %.10f %.10f!", overlap, block_1_overlap, block_2_overlap); } } }GT_SHASH_END_ITERATE; uint64_t unique_genes_between_pairs = gt_shash_get_num_elements(merged_counts); // we found unique genes through the pair, so we can use // the merged map to do the final counts if(unique_genes_between_pairs > 0){ // we flip the exon gene hit counts in case if(unique_genes_between_pairs == 1){ for(i=0;i<blocks;i++){ if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1; } } // merge the gene counts weighted to a single map GT_SHASH_BEGIN_KEY_ITERATE(merged_counts, gene_id){ double v = 0.0; if(gt_shash_is_contained(local_exon_counts_1, gene_id) || ((params == NULL || params->exon_overlap <= 0.0) && gt_shash_is_contained(local_gene_counts_1, gene_id))){ v+= 1.0; } if(gt_shash_is_contained(local_exon_counts_2, gene_id) || ((params == NULL || params->exon_overlap <= 0.0 )&& gt_shash_is_contained(local_gene_counts_2, gene_id))){ v+=1.0; } if(v > 0.0) gt_gtf_count_weight_(local_gene_counts, gene_id, v); }GT_SHASH_END_ITERATE; } // get the number of hits of this map uint64_t num_gene_hits = gt_shash_get_num_elements(local_gene_counts); if(pattern_counts != NULL){ // now iterate the blocks and construct final pattern for(i=0; i<blocks; i++){ char* p = *(gt_vector_get_elm(local_type_patterns, i, char*)); if(strcmp(p, GT_GTF_TYPE_EMPTY_BLOCK) == 0) continue; // for exons check that in case we have a single gene hit, its exons, in case of a multi-gene hit, append _mg if // the multi gene hit comes from the current block gt_gtf_join_(pattern, p, (strcmp("exon",p) == 0) ? ((num_gene_hits == 1) ? false : (local_exon_gene_hits[i] > 1)) : false, 1); // add paired end spacer if(map2 != NULL && i == (blocks1-1)){ gt_string_append_char(pattern, '|'); }else{ if(i<blocks-1){ gt_string_append_char(pattern, '^'); } } } gt_string_append_eos(pattern); // count global type based on the constructed pattern gt_gtf_count_(pattern_counts, gt_string_get_string(pattern)); } if(params != NULL && params->num_maps == 1){ // count junctions for single mapping reads if(blocks1 > 1){ params->num_junctions += blocks1 - 1; params->num_annotated_junctions += gt_gtf_count_junction(gtf, map1); } if(blocks2 > 1){ params->num_junctions += blocks2 - 1; params->num_annotated_junctions += gt_gtf_count_junction(gtf, map2); } } if(gene_counts != NULL){ // count the gene ids GT_SHASH_BEGIN_ITERATE(local_gene_counts, key, e, double){ if(gt_shash_is_contained(gene_counts, key)){ double current = gt_gtf_get_count_weight(gene_counts, key); if(current < *e){ // set to max count gt_gtf_count_weight_(gene_counts, key, (*e)-current); } }else{ gt_gtf_count_weight_(gene_counts, key, *e); } }GT_SHASH_END_ITERATE; } if(params != NULL && params->single_transcript_coverage != NULL){ // do coverage counts for merged genes GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){ // count map1 GT_MAP_ITERATE(map1, map_block){ gt_gtf_count_coverage_(gtf, map_block, key, params); } if(map2 != NULL){ GT_MAP_ITERATE(map2, map_block){ gt_gtf_count_coverage_(gtf, map_block, key, params); } } }GT_SHASH_END_ITERATE; } // cleanup gt_vector_delete(local_type_patterns); gt_shash_delete(local_gene_counts, true); // cleanup gt_shash_delete(local_gene_counts_1, true); gt_shash_delete(local_gene_counts_2, true); gt_shash_delete(local_exon_counts_1, true); gt_shash_delete(local_exon_counts_2, true); gt_shash_delete(local_junction_counts_1, true); gt_shash_delete(local_junction_counts_2, true); gt_shash_delete(local_type_counts, true); gt_shash_delete(merged_counts, true); free(local_exon_gene_hits); return gt_shash_get_num_elements(gene_counts); } GT_INLINE uint64_t gt_gtf_count_alignment(gt_gtf* const gtf, gt_alignment* const alignment, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){ uint64_t hits = 0; gt_string* pattern = gt_string_new(16); params->num_maps = gt_alignment_get_num_maps(alignment); GT_ALIGNMENT_ITERATE(alignment,map) { hits = gt_gtf_count_map(gtf, map, NULL, pattern_count, gene_counts, pattern, params); gt_string_clear(pattern); } gt_string_delete(pattern); return hits; } GT_INLINE uint64_t gt_gtf_count_template(gt_gtf* const gtf, gt_template* const template, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){ uint64_t hits = 0; gt_string* pattern = gt_string_new(16); params->num_maps = gt_template_get_num_mmaps(template); GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attr) { hits = gt_gtf_count_map(gtf, mmap[0], mmap[1], pattern_count, gene_counts, pattern, params); gt_string_clear(pattern); } gt_string_delete(pattern); return hits; } GT_INLINE void gt_gtf_search_map(const gt_gtf* const gtf, gt_vector* const hits, gt_map* const map, const bool clean_target){ GT_MAP_ITERATE(map, block){ uint64_t start = gt_map_get_begin_mapping_position(map); uint64_t end = gt_map_get_end_mapping_position(map); gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, clean_target); } } GT_INLINE void gt_gtf_search_alignment(const gt_gtf* const gtf, gt_vector* const hits, gt_alignment* const alignment){ GT_ALIGNMENT_ITERATE(alignment, map){ gt_gtf_search_map(gtf, hits, map, true); } } GT_INLINE void gt_gtf_search_template(const gt_gtf* const gtf, gt_vector* const hits, gt_template* const template){ GT_TEMPLATE_IF_REDUCES_TO_ALIGNMENT(template, alignment){ gt_gtf_search_alignment(gtf,hits, alignment); }GT_TEMPLATE_END_REDUCTION__RETURN; gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 0)); gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 1)); }
mandelbrot.h
//============================================================================== // // Copyright 2018 The InsideLoop Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //============================================================================== //////////////////////////////////////////////////////////////////////////////// // The Mandelbrot set is the set of all complex numbers c such that the sequence // defined by // - z_0 = 0 // - For all n in N, z_(n+1) = z_n^2 + c // is bounded. One can prove that if there exists a n in N such that |z_n| > 2, // the sequence (z_n) is not bounded. // Therefore, given c in C, we compute z_0, ..., z_n up to the first n such // that |z_n| > 2 or n >= depth (we use a depth of 50 here) and we store this // value n. If this value is < depth, we know for sure that c is not in the // Mandelbrot set. If this value is equal to depth, it is likely that it is // in the Mandelbrot set. // // The following program computes the values n for every z = x + i y with // x_left <= x <= x_right and y_bottom <= y <= y_top. //////////////////////////////////////////////////////////////////////////////// // Opportunities for parallelization: // - Thread level // One can assign different chunks of the rectangle to different cores. // Usually, the first core computes points for y_top >= y >= y_1, the second // core computes points for y_1 >= y >= y_2, etc. // - Vector level // To compute the value n for a given point z, a while loop has to be made. // The number of times this loop is executed depends upon z. Therefore, there // is no easy vectorization for this kind of loop. However, the Intel compiler // can vectorize such a loop: suppose that we have 4 points and we want to // compute the values n for each of this point: we iterate the while loop // until all the points satisfy the exit condition, but we store the values // for which the exit condition has been satistifed for all the points. All // this transformation is handled explicitly by the compiler using a // #pragma omp simd (in OpenMP) before the x-loop. Note that close points // should generally have closed values for n. Therefore, the extra amount of // work done by the vectorized loop should ne be a penalty. //////////////////////////////////////////////////////////////////////////////// // To run, compile with -std=c++11 -Ofast -xHost -openmp -DNDEBUG // // - Load imbalance: // Without the schedule(dynamic) clause for the OpenMP threads, the fastest // is TBB for threads (with OpenMP for vectorization). It is faster // than plain OpenMP for threads (and vectorization) without schedule // clause because there is load imbalance in the outer for loop: for y // close to y_top or y_bottom, the complex number z goes out or the circle // of radius 2 very quickly. We get the following timings: // - 275 milliseconds for TBB/OpenMP // - 351 milliseconds for OpenMP (without schedule clause)/OpenMP // But if we change the OpenMP clause to (#pragma omp parallel for // schedule(dynamic)), the runtime comes down to 260 milliseconds, a bit // faster than TBB/OpenMP. #ifndef IL_MANDELBROT_H #define IL_MANDELBROT_H #include <iostream> #include <il/Array2D.h> #include <il/benchmark/tools/memory/memory.h> #include <il/benchmark/tools/timer/Timer.h> #ifdef IL_TBB #include <tbb/tbb.h> #endif #ifdef IL_CILK #include <cilk/cilk.h> #endif namespace il { // const float x_left = -1.0; // const float x_right = 2.0; // const float y_bottom = -1.5; // const float y_top = 1.5; // // const il::int_t nx = 10000; // const il::int_t ny = 10000; // // const int depth = 50; // Mandelbrot set: No threads, no vectorization // double time_mandelbrot_serial_serial(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::Timer timer{}; for (il::int_t ky = 0; ky < ny; ++ky) { float y{y_top - ky * dy}; for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } timer.Stop(); il::escape(v.data()); return timer.elapsed(); } // Mandelbrot set: OpenMP for threads, no vectorisation // double time_mandelbrot_openmp_serial(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::Timer timer{}; #pragma omp parallel for schedule(dynamic) for (il::int_t ky = 0; ky < ny; ++ky) { auto y = float{y_top - ky * dy}; for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } timer.Stop(); il::escape(v.data()); return timer.elapsed(); } // Mandelbrot set: OpenMP for threads, OpenMP for vectorisation // double time_mandelbrot_openmp_openmp(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::Timer timer{}; #pragma omp parallel for schedule(dynamic) for (il::int_t ky = 0; ky < ny; ++ky) { float y{y_top - ky * dy}; #pragma omp simd for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } timer.Stop(); il::escape(v.data()); return timer.elapsed(); } // Mandelbrot set: TBB for threads, no vectorisation // #ifdef IL_TBB double time_mandelbrot_tbb_serial(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::SimpleTimer timer{}; tbb::parallel_for(tbb::blocked_range<il::int_t>(0, ny), [=, &v](const tbb::blocked_range<il::int_t>& range) { for (il::int_t ky{range.begin()}; ky < range.end(); ++ky) { float y{y_top - ky * dy}; for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } }); timer.Stop(); il::escape(v.data()); return timer.elapsed(); } #endif // Mandelbrot set: TBB for threads, OpenMP for vectorisation // #ifdef IL_TBB double time_mandelbrot_tbb_openmp(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::SimpleTimer timer{}; tbb::parallel_for(tbb::blocked_range<il::int_t>(0, ny), [=, &v](const tbb::blocked_range<il::int_t>& range) { for (il::int_t ky{range.begin()}; ky < range.end(); ++ky) { float y{y_top - ky * dy}; #pragma omp simd for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } }); timer.Stop(); il::escape(v.data()); return timer.elapsed(); } #endif // Mandelbrot set: Cilk for threads, no vectorisation // #ifdef IL_CILK double time_mandelbrot_cilk_serial(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::SimpleTimer timer{}; cilk_for(il::int_t ky = 0; ky < ny; ++ky) { float y{y_top - ky * dy}; for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } timer.Stop(); il::escape(v.data()); return timer.elapsed(); } #endif // Mandelbrot set: Cilk for threads, OpenMP for vectorisation // #ifdef IL_CILK double time_mandelbrot_cilk_openmp(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::SimpleTimer timer{}; cilk_for(il::int_t ky = 0; ky < ny; ++ky) { float y{y_top - ky * dy}; #pragma omp simd for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; } } timer.Stop(); il::escape(v.data()); return timer.elapsed(); } #endif // Mandelbrot set: Cilk for threads, Cilk for vectorisation // #ifdef IL_CILK double time_mandelbrot_cilk_cilk(float x_left, float x_right, float y_bottom, float y_top, il::int_t depth, il::int_t nx, il::int_t ny, bool warm_cache) { const float dx{(x_right - x_left) / nx}; const float dy{(y_top - y_bottom) / ny}; il::Array2D<int> v{nx, ny}; il::commit_memory(il::io, v); if (warm_cache) { il::warm_cache(il::io, v); } il::SimpleTimer timer{}; cilk_for(il::int_t ky = 0; ky < ny; ++ky) { float y{y_top - ky * dy}; #pragma simd for (il::int_t kx = 0; kx < nx; ++kx) { float x{x_left + kx * dx}; float z_re = 0.0; float z_im = 0.0; int count = 0; while (count < depth) { if (z_re * z_re + z_im * z_im > 4.0) { break; } float old_z_re{z_re}; z_re = z_re * z_re - z_im * z_im + x; z_im = 2 * old_z_re * z_im + y; ++count; } v(kx, ky) = count; x += dx; } y -= dy; } timer.Stop(); il::escape(v.data()); return timer.elapsed(); } #endif #endif // IL_MANDELBROT_H
GB_unop__identity_fc32_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_bool) // op(A') function: GB (_unop_tran__identity_fc32_bool) // C type: GxB_FC32_t // A type: bool // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_bool) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp-matmat-schedule.c
/***************************************************************************** Example : omp-matmat-schedule.c Objective : OpenMP program to demonstrate the use of OpenMP schedule clause ( LOOP SCHEDULING & PARTITIONING ). Threads share the iteration of the loop according to the chunk size. This example demonstrates the use of PARALLEL for Directive and Schedule clauses Input : a) Number of threads b) Chunk Size c) size of Matrices(i.e Size of Matrix A and Matrix B) ie in terms of CLASS where CLASS A :1024; CLASS B: 2048 and CLASS C: 4096 Output : Each thread computes the matrix matrix multiplication and master prints the time taken for the computation while using different method and different scheduling. Created : Aug 2011. Author : RarchK *********************************************************************************/ #include <stdio.h> #include <sys/time.h> #include <omp.h> #include <stdlib.h> /* Main Program */ main(int argc,char **argv) { int i,j, k,Noofthreads; float **Matrix_A, **Matrix_B, **Result, **Checkoutput; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead1,time_overhead2,time_overhead3,time_overhead4; int chunksize; int CLASS_SIZE,MATRIX_SIZE; char *CLASS; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : Loop Scheduling and Partitioning (Dense Matrix Computation) \n "); printf("\n\t\t Matrix into Matrix Multiplication using "); printf("\n\t\t OpenMP PARALLEL for directive and Schedule clause"); printf("\n\t\t..........................................................................\n"); /* Checking for command line arguments */ if( argc != 4 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Threads> <chunksize> <CLASS>\n"); printf("\t\t Where : CLASS = A or B or C\n"); exit(-1); } Noofthreads=atoi(argv[1]); if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) { printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n"); exit(-1); } CLASS = argv[3]; if( strcmp(CLASS, "A" )==0){ CLASS_SIZE = 1024; } else if( strcmp(CLASS, "B" )==0){ CLASS_SIZE = 2048; } else if( strcmp(CLASS, "C" )==0){ CLASS_SIZE = 4096; } else { printf("\n\t\t Error : value of CLASS must be A or B or C \n"); exit(-1); } chunksize=atoi(argv[2]); MATRIX_SIZE = CLASS_SIZE ; printf("\n\t\t Threads : %d ",Noofthreads); printf("\n\t\t Chunk Size : %d ",chunksize); printf("\n\t\t Matrix A Size : %d ",MATRIX_SIZE ); printf("\n\t\t Matrix B Size : %d ",MATRIX_SIZE); /* Dynamic memory allocation and initialization of Matrix_A Elements */ Matrix_A = (float **) malloc(sizeof(float *) * MATRIX_SIZE); for (i = 0; i < MATRIX_SIZE ; i++) { Matrix_A[i] = (float *) malloc(sizeof(float) * MATRIX_SIZE); for (j = 0; j < MATRIX_SIZE; j++) Matrix_A[i][j] = i + j; } /* Matrix_B Elements */ Matrix_B = (float **) malloc(sizeof(float *) * MATRIX_SIZE); for (i = 0; i < MATRIX_SIZE; i++) { Matrix_B[i] = (float *) malloc(sizeof(float) * MATRIX_SIZE); for (j = 0; j < MATRIX_SIZE; j++) Matrix_B[i][j] = i + j; } /* Dynamic Memory Allocation */ Result = (float **) malloc(sizeof(float *) * MATRIX_SIZE); Checkoutput = (float **) malloc(sizeof(float *) * MATRIX_SIZE); for (i = 0; i < MATRIX_SIZE; i++) { Result[i] = (float *) malloc(sizeof(float) * MATRIX_SIZE); Checkoutput[i] = (float *) malloc(sizeof(float) * MATRIX_SIZE); for (j = 0; j < MATRIX_SIZE; j++) { Result[i][j] = 0.0; Checkoutput[i][j] = 0.0; } } /* ................................................................................. This section of Parallelized the for loop without shedule clause. ....................................................................................*/ gettimeofday(&TimeValue_Start, &TimeZone_Start); omp_set_num_threads(Noofthreads); /* OpenMP Parallel For Directive : Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(j,k) for (i = 0; i < MATRIX_SIZE; i = i + 1) for (j = 0; j < MATRIX_SIZE; j = j + 1) for (k = 0; k < MATRIX_SIZE; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; /* All threads join master thread and disband */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Calculate the time taken for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead1 = (time_end - time_start)/1000000.0; printf("\n\n\t\t Matrix into Matrix Multiplication using Parallel for directive without schedule clause ...Done "); /*........................................................................................ This section of Parallelized the for loop with schedule (static,chunksize) clause. Threads share the iteration of the loop according to the chunk size. Loop iterations are divided into pieces of size chunk and then statically assigned to threads. ........................................................................................*/ gettimeofday(&TimeValue_Start, &TimeZone_Start); /* OpenMP Parallel For Directive : Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(j,k) schedule(static,chunksize) for (i = 0; i < MATRIX_SIZE; i = i + 1) for (j = 0; j < MATRIX_SIZE; j = j + 1) for (k = 0; k < MATRIX_SIZE; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; /* All threads join master thread and disband */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Calculate the time taken for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead2 = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Matrix Multiplication using Parallel for directive with schedule(static,chunksize) clause ......Done"); /*........................................................................................ This section of Parallelized the for loop with schedule (dynamic,chunksize) clause. Loop iterations are divided into pieces of size chunk, and dynamically scheduled among the threads; when a thread finishes one chunk, it is dynamically assigned another. ........................................................................................*/ gettimeofday(&TimeValue_Start, &TimeZone_Start); /* OpenMP Parallel For Directive : Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(j,k) schedule(dynamic,chunksize) for (i = 0; i < MATRIX_SIZE; i = i + 1) for (j = 0; j < MATRIX_SIZE; j = j + 1) for (k = 0; k < MATRIX_SIZE; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; /* All threads join master thread and disband */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Calculate the time taken for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead3 = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Matrix Multiplication using Parallel for directive with schedule(dynamic,chunksize) clause ......Done "); /*........................................................................................ This section serially do the Matrix Matrix computation . ........................................................................................*/ gettimeofday(&TimeValue_Start, &TimeZone_Start); /* Serial Computation */ for (i = 0; i < MATRIX_SIZE; i = i + 1) for (j = 0; j < MATRIX_SIZE; j = j + 1) for (k = 0; k < MATRIX_SIZE; k = k + 1) Checkoutput[i][j] = Checkoutput[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Calculate the time taken for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead4 = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Matrix Multiplication by serial .....................................Done \n"); printf("\n\n\t\t Time in Seconds ( without schedule clause ) : %lf Seconds ",time_overhead1); printf("\n\t\t Time in Seconds ( with schedule (static,chunksize) : %lf Seconds ",time_overhead2); printf("\n\t\t Time in Seconds ( with schedule (dynamic,chunksize) : %lf Seconds ",time_overhead3); printf("\n\t\t Time in Seconds ( using serial computation) : %lf Seconds \n",time_overhead4); printf("\n\t\t ( T represents the Time taken for computation )"); printf("\n\t\t..........................................................................\n"); /* Freeing Allocated Memory */ free(Matrix_A); free(Matrix_B); free(Result); free(Checkoutput); }
train_share_states.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TRAIN_SHARE_STATES_H_ #define LIGHTGBM_TRAIN_SHARE_STATES_H_ #include <LightGBM/bin.h> #include <LightGBM/feature_group.h> #include <LightGBM/meta.h> #include <LightGBM/utils/threading.h> #include <algorithm> #include <memory> #include <vector> namespace LightGBM { class MultiValBinWrapper { public: MultiValBinWrapper(MultiValBin* bin, data_size_t num_data, const std::vector<int>& feature_groups_contained); bool IsSparse() { if (multi_val_bin_ != nullptr) { return multi_val_bin_->IsSparse(); } return false; } void InitTrain(const std::vector<int>& group_feature_start, const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups, const std::vector<int8_t>& is_feature_used, const data_size_t* bagging_use_indices, data_size_t bagging_indices_cnt); void HistMove(const std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>& hist_buf); void HistMerge(std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf); void ResizeHistBuf(std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf, MultiValBin* sub_multi_val_bin, hist_t* origin_hist_data); template <bool USE_INDICES, bool ORDERED> void ConstructHistograms(const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf, hist_t* origin_hist_data) { const auto cur_multi_val_bin = (is_use_subcol_ || is_use_subrow_) ? multi_val_bin_subset_.get() : multi_val_bin_.get(); if (cur_multi_val_bin != nullptr) { global_timer.Start("Dataset::sparse_bin_histogram"); n_data_block_ = 1; data_block_size_ = num_data; Threading::BlockInfo<data_size_t>(num_threads_, num_data, min_block_size_, max_block_size_, &n_data_block_, &data_block_size_); ResizeHistBuf(hist_buf, cur_multi_val_bin, origin_hist_data); OMP_INIT_EX(); #pragma omp parallel for schedule(static) num_threads(num_threads_) for (int block_id = 0; block_id < n_data_block_; ++block_id) { OMP_LOOP_EX_BEGIN(); data_size_t start = block_id * data_block_size_; data_size_t end = std::min<data_size_t>(start + data_block_size_, num_data); ConstructHistogramsForBlock<USE_INDICES, ORDERED>( cur_multi_val_bin, start, end, data_indices, gradients, hessians, block_id, hist_buf); OMP_LOOP_EX_END(); } OMP_THROW_EX(); global_timer.Stop("Dataset::sparse_bin_histogram"); global_timer.Start("Dataset::sparse_bin_histogram_merge"); HistMerge(hist_buf); global_timer.Stop("Dataset::sparse_bin_histogram_merge"); global_timer.Start("Dataset::sparse_bin_histogram_move"); HistMove(*hist_buf); global_timer.Stop("Dataset::sparse_bin_histogram_move"); } } template <bool USE_INDICES, bool ORDERED> void ConstructHistogramsForBlock(const MultiValBin* sub_multi_val_bin, data_size_t start, data_size_t end, const data_size_t* data_indices, const score_t* gradients, const score_t* hessians, int block_id, std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf) { hist_t* data_ptr = origin_hist_data_; if (block_id == 0) { if (is_use_subcol_) { data_ptr = hist_buf->data() + hist_buf->size() - 2 * static_cast<size_t>(num_bin_aligned_); } } else { data_ptr = hist_buf->data() + static_cast<size_t>(num_bin_aligned_) * (block_id - 1) * 2; } std::memset(reinterpret_cast<void*>(data_ptr), 0, num_bin_ * kHistBufferEntrySize); if (USE_INDICES) { if (ORDERED) { sub_multi_val_bin->ConstructHistogramOrdered(data_indices, start, end, gradients, hessians, data_ptr); } else { sub_multi_val_bin->ConstructHistogram(data_indices, start, end, gradients, hessians, data_ptr); } } else { sub_multi_val_bin->ConstructHistogram(start, end, gradients, hessians, data_ptr); } } void CopyMultiValBinSubset(const std::vector<int>& group_feature_start, const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups, const std::vector<int8_t>& is_feature_used, const data_size_t* bagging_use_indices, data_size_t bagging_indices_cnt); void SetUseSubrow(bool is_use_subrow) { is_use_subrow_ = is_use_subrow; } void SetSubrowCopied(bool is_subrow_copied) { is_subrow_copied_ = is_subrow_copied; } private: bool is_use_subcol_ = false; bool is_use_subrow_ = false; bool is_subrow_copied_ = false; std::unique_ptr<MultiValBin> multi_val_bin_; std::unique_ptr<MultiValBin> multi_val_bin_subset_; std::vector<uint32_t> hist_move_src_; std::vector<uint32_t> hist_move_dest_; std::vector<uint32_t> hist_move_size_; const std::vector<int> feature_groups_contained_; int num_threads_; int max_block_size_; int num_bin_; int num_bin_aligned_; int n_data_block_; int data_block_size_; int min_block_size_; int num_data_; hist_t* origin_hist_data_; const size_t kHistBufferEntrySize = 2 * sizeof(hist_t); }; struct TrainingShareStates { int num_threads = 0; bool is_col_wise = true; bool is_constant_hessian = true; const data_size_t* bagging_use_indices; data_size_t bagging_indices_cnt; TrainingShareStates() { multi_val_bin_wrapper_.reset(nullptr); } int num_hist_total_bin() { return num_hist_total_bin_; } const std::vector<uint32_t>& feature_hist_offsets() { return feature_hist_offsets_; } bool IsSparseRowwise() { return (multi_val_bin_wrapper_ != nullptr && multi_val_bin_wrapper_->IsSparse()); } void SetMultiValBin(MultiValBin* bin, data_size_t num_data, const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups, bool dense_only, bool sparse_only); void CalcBinOffsets(const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups, std::vector<uint32_t>* offsets, bool is_col_wise); void InitTrain(const std::vector<int>& group_feature_start, const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups, const std::vector<int8_t>& is_feature_used) { if (multi_val_bin_wrapper_ != nullptr) { multi_val_bin_wrapper_->InitTrain(group_feature_start, feature_groups, is_feature_used, bagging_use_indices, bagging_indices_cnt); } } template <bool USE_INDICES, bool ORDERED> void ConstructHistograms(const data_size_t* data_indices, data_size_t num_data, const score_t* gradients, const score_t* hessians, hist_t* hist_data) { if (multi_val_bin_wrapper_ != nullptr) { multi_val_bin_wrapper_->ConstructHistograms<USE_INDICES, ORDERED>( data_indices, num_data, gradients, hessians, &hist_buf_, hist_data); } } void SetUseSubrow(bool is_use_subrow) { if (multi_val_bin_wrapper_ != nullptr) { multi_val_bin_wrapper_->SetUseSubrow(is_use_subrow); } } void SetSubrowCopied(bool is_subrow_copied) { if (multi_val_bin_wrapper_ != nullptr) { multi_val_bin_wrapper_->SetSubrowCopied(is_subrow_copied); } } private: std::vector<uint32_t> feature_hist_offsets_; int num_hist_total_bin_ = 0; std::unique_ptr<MultiValBinWrapper> multi_val_bin_wrapper_; std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>> hist_buf_; int num_total_bin_ = 0; double num_elements_per_row_ = 0.0f; }; } // namespace LightGBM #endif // LightGBM_TRAIN_SHARE_STATES_H_
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; int size = 0; size += sizeof(ocp_nlp_sqp_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // int ii; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->ext_qp_res = 0; opts->qp_warm_start = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; // overwrite default submodules opts // qp tolerance qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp); return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { // config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "tol_stat")) { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); // printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field); // exit(1); } } return; } void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; size += sizeof(ocp_nlp_sqp_memory); // nlp res size += ocp_nlp_res_calculate_size(dims); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = opts->max_iter+1; int stat_n = 6; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 6; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int size = 0; // sqp size += sizeof(ocp_nlp_sqp_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer0, timer1; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; // zero timers double total_time = 0.0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_ptr(nlp_mem->qp_in->idxs[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // main sqp loop int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; for (; sqp_iter < opts->max_iter; sqp_iter++) { if (opts->print_level > 0) { printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter); if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); } // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, nlp_mem); nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_b : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_d : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_m : nlp_out->inf_norm_res; // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g; mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b; mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d; mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m; } // exit conditions on residuals if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) & (mem->nlp_res->inf_norm_res_b < opts->tol_eq) & (mem->nlp_res->inf_norm_res_d < opts->tol_ineq) & (mem->nlp_res->inf_norm_res_m < opts->tol_comp)) { // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time nlp_out->total_time = total_time; mem->time_tot = total_time; #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; return mem->status; } // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // (typically) no warm start at first iteration if (sqp_iter == 0 && !opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // restore default warm start if (sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; // printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter); qp_iter = qp_info_->num_iter; // save statistics of last qp solver call if (sqp_iter+1 < mem->stat_m) { mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status; mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter; } // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6)); } if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(nlp_mem->qp_in); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time mem->time_tot = total_time; nlp_out->total_time = total_time; printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif if (opts->print_level > 1) { printf("\n Failed to solve the following QP:\n"); if (opts->print_level > sqp_iter + 1) print_ocp_qp_in(nlp_mem->qp_in); } mem->status = ACADOS_QP_FAILURE; return mem->status; } ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // } if (opts->print_level > 0) { printf("Residuals: stat: %e, eq: %e, ineq: %e, comp: %e.\n", mem->nlp_res->inf_norm_res_g, mem->nlp_res->inf_norm_res_b, mem->nlp_res->inf_norm_res_d, mem->nlp_res->inf_norm_res_m ); } } // stop timer total_time += acados_toc(&timer0); // ocp_nlp_out_print(nlp_out); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // save time mem->time_tot = total_time; nlp_out->total_time = total_time; // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; printf("\n ocp_nlp_sqp: maximum iterations reached\n"); if (opts->print_level > 0) { printf("Residuals: stat: %e, eq: %e, ineq: %e, comp: %e.\n", mem->nlp_res->inf_norm_res_g, mem->nlp_res->inf_norm_res_b, mem->nlp_res->inf_norm_res_d, mem->nlp_res->inf_norm_res_m ); } return mem->status; } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(all) add flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field)) { double tmp = 0.0; double *ptr = return_value_; int N = dims->N; int ii; for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp); *ptr += tmp; } } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_res; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; return; }
tinyexr.h
#ifndef TINYEXR_H_ #define TINYEXR_H_ /* Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 // tile format image; // not zero for only a single-part "normal" tiled file (according to spec.) int tiled; int long_name; // long name attribute // deep image(EXR 2.0); // for a multi-part file, indicates that at least one part is of type deep* (according to spec.) int non_image; int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRBox2i { int min_x; int min_y; int max_x; int max_y; } EXRBox2i; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; EXRBox2i data_window; EXRBox2i display_window; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; // for a single-part file, agree with the version field bit 11 // for a multi-part file, it is consistent with the type of part int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) // name attribute required for multipart files; // must be unique and non empty (according to spec.); // use EXRSetNameAttr for setting value; // max 255 character allowed - excluding terminating zero char name[256]; } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. struct _EXRImage* next_level; // NULL if scanline format or image is the last level. int level_x; // x level index int level_y; // y level index unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image // contains A(single channel alpha) or RGB(A) channels. Application must free // image data as returned by `out_rgba` Result image format is: float x RGBA x // width x hight Returns negative value and may set error string in `err` when // there's an error When the specified layer name is not found in the EXR file, // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory // after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(will be filled when the function returns error // code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Returns the number of resolution levels of the image (including the base) extern int EXRNumLevels(const EXRImage* exr_image); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Set name attribute of EXRHeader struct (it makes a copy) extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Frees internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Frees internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Frees error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRMultipartImageToFile(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEFINED #define TINYEXR_IMPLEMENTATION_DEFINED #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #ifndef NOMINMAX #define NOMINMAX #endif #include <windows.h> // for UTF-8 #endif #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #include <set> // https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support #if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900) #define TINYEXR_HAS_CXX11 (1) // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Weverything" #endif #include "zfp.h" #ifdef __clang__ #pragma clang diagnostic pop #endif #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occurred in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(float *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else float tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(&outLen); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { int min_x; int min_y; int max_x; int max_y; } Box2iInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; Box2iInfo data_window; int line_order; Box2iInfo display_window; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tiled; // Non-zero if the part is tiled. int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; // required for multi-part or non-image files std::string name; // required for multi-part or non-image files std::string type; void clear() { channels.clear(); attributes.clear(); data_window.min_x = 0; data_window.min_y = 0; data_window.max_x = 0; data_window.max_y = 0; line_order = 0; display_window.min_x = 0; display_window.min_y = 0; display_window.max_x = 0; display_window.max_y = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tiled = 0; tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; name.clear(); type.clear(); } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(&info.pixel_type); tinyexr::swap4(&info.x_sampling); tinyexr::swap4(&info.y_sampling); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(&pixel_type); tinyexr::swap4(&x_sampling); tinyexr::swap4(&y_sampling); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressible run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierarchical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- unsigned int len : 8; // code length 0 unsigned int lit : 24; // lit p size unsigned int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; for (unsigned int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new unsigned int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // unsigned int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; unsigned int precision; unsigned int __pad0; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* unsigned int __pad1; ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0; } }; static bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes, std::string *err) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) { if (attributes[i].size == 1) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; break; } else { if (err) { (*err) += "zfpCompressionType attribute must be uchar(1 byte) type.\n"; } return false; } } } if (!foundType) { if (err) { (*err) += "`zfpCompressionType` attribute not found.\n"; } return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionRate` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionPrecision` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionTolerance` attribute not found.\n"; } } else { if (err) { (*err) += "Unknown value specified for `zfpCompressionType`.\n"; } } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, size_t num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = size_t(dst_width) * size_t(dst_num_lines) * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, static_cast<unsigned int>(dst_width), static_cast<unsigned int>(dst_num_lines) * static_cast<unsigned int>(num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = size_t(dst_width) * size_t(dst_num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // decompress 4x4 pixel block. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) { for (size_t x = 0; x < size_t(dst_width); x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. static bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, static_cast<unsigned int>(width), static_cast<unsigned int>(num_lines * num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = size_t(width) * size_t(num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // compress 4x4 pixel block. for (size_t y = 0; y < size_t(num_lines); y += 4) { for (size_t x = 0; x < size_t(width); x += 4) { float fblock[16]; for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp)); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // heuristics #define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192) // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; std::string e; if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { // Here, data_width and data_height are the dimensions of the current (sub)level. if (tile_size_x * tile_offset_x > data_width || tile_size_y * tile_offset_y > data_height) { return false; } // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } #ifdef _WIN32 static inline std::wstring UTF8ToWchar(const std::string &str) { int wstr_size = MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0); std::wstring wstr(wstr_size, 0); MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0], (int)wstr.size()); return wstr; } #endif static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; bool has_name = false; bool has_type = false; info->name.clear(); info->type.clear(); info->data_window.min_x = 0; info->data_window.min_y = 0; info->data_window.max_x = 0; info->data_window.max_y = 0; info->line_order = 0; // @fixme info->display_window.min_x = 0; info->display_window.min_y = 0; info->display_window.max_x = 0; info->display_window.max_y = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tiled = 0; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; // For a multipart file, the version field 9th bit is 0. if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) || y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { if (err) { (*err) = "Tile sizes were invalid."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; info->tiled = 1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->data_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->data_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->data_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->data_window.min_x); tinyexr::swap4(&info->data_window.min_y); tinyexr::swap4(&info->data_window.max_x); tinyexr::swap4(&info->data_window.max_y); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->display_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->display_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->display_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->display_window.min_x); tinyexr::swap4(&info->display_window.min_y); tinyexr::swap4(&info->display_window.max_x); tinyexr::swap4(&info->display_window.max_y); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4(&info->pixel_aspect_ratio); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4(&info->screen_window_center[0]); tinyexr::swap4(&info->screen_window_center[1]); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4(&info->screen_window_width); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(&info->chunk_count); } } else if (attr_name.compare("name") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->name.resize(len); info->name.assign(reinterpret_cast<const char*>(&data[0]), len); has_name = true; } } else if (attr_name.compare("type") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->type.resize(len); info->type.assign(reinterpret_cast<const char*>(&data[0]), len); has_type = true; } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (version->multipart || version->non_image) { if (!has_name) { ss_err << "\"name\" attribute not found in the header." << std::endl; } if (!has_type) { ss_err << "\"type\" attribute not found in the header." << std::endl; } } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window.min_x = info.display_window.min_x; exr_header->display_window.min_y = info.display_window.min_y; exr_header->display_window.max_x = info.display_window.max_x; exr_header->display_window.max_y = info.display_window.max_y; exr_header->data_window.min_x = info.data_window.min_x; exr_header->data_window.min_y = info.data_window.min_y; exr_header->data_window.max_x = info.data_window.max_x; exr_header->data_window.max_y = info.data_window.max_y; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tiled = info.tiled; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; EXRSetNameAttr(exr_header, info.name.c_str()); if (!info.type.empty()) { if (info.type == "scanlineimage") { assert(!exr_header->tiled); } else if (info.type == "tiledimage") { assert(exr_header->tiled); } else if (info.type == "deeptile") { exr_header->non_image = 1; assert(exr_header->tiled); } else if (info.type == "deepscanline") { exr_header->non_image = 1; assert(!exr_header->tiled); } else { assert(false); } } exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy pointer exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } struct OffsetData { OffsetData() : num_x_levels(0), num_y_levels(0) {} std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets; int num_x_levels; int num_y_levels; }; int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { switch (tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: return 0; case TINYEXR_TILE_MIPMAP_LEVELS: return lx; case TINYEXR_TILE_RIPMAP_LEVELS: return lx + ly * num_x_levels; default: assert(false); } return 0; } static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) { assert(level >= 0); int b = (int)(1u << (unsigned)level); int level_size = toplevel_size / b; if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size) level_size += 1; return std::max(level_size, 1); } static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, const OffsetData& offset_data, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const unsigned char* head, const size_t size, std::string* err) { int num_channels = exr_header->num_channels; int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); int num_tiles = num_x_tiles * num_y_tiles; int err_code = TINYEXR_SUCCESS; enum { EF_SUCCESS = 0, EF_INVALID_DATA = 1, EF_INSUFFICIENT_DATA = 2, EF_FAILED_TO_DECODE = 4 }; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<unsigned> error_flag(EF_SUCCESS); #else unsigned error_flag(EF_SUCCESS); #endif // Although the spec says : "...the data window is subdivided into an array of smaller rectangles...", // the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window. #if 0 if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) && exr_image->level_x == 0 && exr_image->level_y == 0) { if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } #endif exr_image->tiles = static_cast<EXRTile*>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); int x_tile = tile_idx % num_x_tiles; int y_tile = tile_idx / num_x_tiles; // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile]; if (offset + sizeof(int) * 5 > size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } size_t data_size = size_t(size - (offset + sizeof(int) * 5)); const unsigned char* data_ptr = reinterpret_cast<const unsigned char*>(head + offset); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(&tile_coordinates[0]); tinyexr::swap4(&tile_coordinates[1]); tinyexr::swap4(&tile_coordinates[2]); tinyexr::swap4(&tile_coordinates[3]); if (tile_coordinates[2] != exr_image->level_x) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } if (tile_coordinates[3] != exr_image->level_y) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(&data_len); if (data_len < 2 || size_t(data_len) > data_size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, exr_image->width, exr_image->height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // Failed to decode tile data. error_flag |= EF_FAILED_TO_DECODE; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto& t : workers) { t.join(); } #else } // parallel for #endif // Even in the event of an error, the reserved memory may be freed. exr_image->num_channels = num_channels; exr_image->num_tiles = static_cast<int>(num_tiles); if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA; if (err) { if (error_flag & EF_INSUFFICIENT_DATA) { (*err) += "Insufficient data length.\n"; } if (error_flag & EF_FAILED_TO_DECODE) { (*err) += "Failed to decode tile data.\n"; } } return err_code; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const OffsetData& offset_data, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, exr_header->custom_attributes, int(exr_header->num_custom_attributes), err)) { return TINYEXR_ERROR_INVALID_HEADER; } #endif } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_y < exr_header->data_window.min_y) { if (err) { (*err) += "Invalid data window.\n"; } return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tiled) { if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x << ", " << "tile height = " << exr_header->tile_size_y << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } } const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) { EXRImage* level_image = NULL; for (int level = 0; level < offset_data.num_x_levels; ++level) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode); level_image->level_x = level; level_image->level_y = level; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } else { EXRImage* level_image = NULL; for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y) for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode); level_image->level_x = level_x; level_image->level_y = level_y; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(&line_no); tinyexr::swap4(&data_len); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window.max_y + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window.min_y); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window.min_y; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(&y); tinyexr::swap4(&data_len); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int FloorLog2(unsigned x) { // // For x > 0, floorLog2(y) returns floor(log(x)/log(2)). // int y = 0; while (x > 1) { y += 1; x >>= 1u; } return y; } static int CeilLog2(unsigned x) { // // For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)). // int y = 0; int r = 0; while (x > 1) { if (x & 1) r = 1; y += 1; x >>= 1u; } return y + r; } static int RoundLog2(int x, int tile_rounding_mode) { return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x)); } static int CalculateNumXLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int w = max_x - min_x + 1; num = RoundLog2(w, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static int CalculateNumYLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int h = max_y - min_y + 1; num = RoundLog2(h, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static void CalculateNumTiles(std::vector<int>& numTiles, int toplevel_size, int size, int tile_rounding_mode) { for (unsigned i = 0; i < numTiles.size(); i++) { int l = LevelSize(toplevel_size, i, tile_rounding_mode); assert(l <= std::numeric_limits<int>::max() - size + 1); numTiles[i] = (l + size - 1) / size; } } static void PrecalculateTileInfo(std::vector<int>& num_x_tiles, std::vector<int>& num_y_tiles, const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num_x_levels = CalculateNumXLevels(exr_header); int num_y_levels = CalculateNumYLevels(exr_header); num_x_tiles.resize(num_x_levels); num_y_tiles.resize(num_y_levels); CalculateNumTiles(num_x_tiles, max_x - min_x + 1, exr_header->tile_size_x, exr_header->tile_rounding_mode); CalculateNumTiles(num_y_tiles, max_y - min_y + 1, exr_header->tile_size_y, exr_header->tile_rounding_mode); } static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) { offset_data.offsets.resize(1); offset_data.offsets[0].resize(1); offset_data.offsets[0][0].resize(num_blocks); offset_data.num_x_levels = 1; offset_data.num_y_levels = 1; } // Return sum of tile blocks. static int InitTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const std::vector<int>& num_x_tiles, const std::vector<int>& num_y_tiles) { int num_tile_blocks = 0; offset_data.num_x_levels = static_cast<int>(num_x_tiles.size()); offset_data.num_y_levels = static_cast<int>(num_y_tiles.size()); switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: case TINYEXR_TILE_MIPMAP_LEVELS: assert(offset_data.num_x_levels == offset_data.num_y_levels); offset_data.offsets.resize(offset_data.num_x_levels); for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { offset_data.offsets[l].resize(num_y_tiles[l]); for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[l]); num_tile_blocks += num_x_tiles[l]; } } break; case TINYEXR_TILE_RIPMAP_LEVELS: offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels)); for (int ly = 0; ly < offset_data.num_y_levels; ++ly) { for (int lx = 0; lx < offset_data.num_x_levels; ++lx) { int l = ly * offset_data.num_x_levels + lx; offset_data.offsets[l].resize(num_y_tiles[ly]); for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[lx]); num_tile_blocks += num_x_tiles[lx]; } } } break; default: assert(false); } return num_tile_blocks; } static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0) return true; return false; } static bool isValidTile(const EXRHeader* exr_header, const OffsetData& offset_data, int dx, int dy, int lx, int ly) { if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false; int num_x_levels = offset_data.num_x_levels; int num_y_levels = offset_data.num_y_levels; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: if (lx == 0 && ly == 0 && offset_data.offsets.size() > 0 && offset_data.offsets[0].size() > static_cast<size_t>(dy) && offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_MIPMAP_LEVELS: if (lx < num_x_levels && ly < num_y_levels && offset_data.offsets.size() > static_cast<size_t>(lx) && offset_data.offsets[lx].size() > static_cast<size_t>(dy) && offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels); if (lx < num_x_levels && ly < num_y_levels && (offset_data.offsets.size() > idx) && offset_data.offsets[idx].size() > static_cast<size_t>(dy) && offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) { return true; } } break; default: return false; } return false; } static void ReconstructTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const unsigned char* head, const unsigned char* marker, const size_t size, bool isMultiPartFile, bool isDeep) { int numXLevels = offset_data.num_x_levels; for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 tileOffset = marker - head; if (isMultiPartFile) { //int partNumber; marker += sizeof(int); } int tileX; memcpy(&tileX, marker, sizeof(int)); tinyexr::swap4(&tileX); marker += sizeof(int); int tileY; memcpy(&tileY, marker, sizeof(int)); tinyexr::swap4(&tileY); marker += sizeof(int); int levelX; memcpy(&levelX, marker, sizeof(int)); tinyexr::swap4(&levelX); marker += sizeof(int); int levelY; memcpy(&levelY, marker, sizeof(int)); tinyexr::swap4(&levelY); marker += sizeof(int); if (isDeep) { tinyexr::tinyexr_int64 packed_offset_table_size; memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size)); marker += sizeof(tinyexr::tinyexr_int64); tinyexr::tinyexr_int64 packed_sample_size; memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size)); marker += sizeof(tinyexr::tinyexr_int64); // next Int64 is unpacked sample size - skip that too marker += packed_offset_table_size + packed_sample_size + 8; } else { int dataSize; memcpy(&dataSize, marker, sizeof(int)); tinyexr::swap4(&dataSize); marker += sizeof(int); marker += dataSize; } if (!isValidTile(exr_header, offset_data, tileX, tileY, levelX, levelY)) return; int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels); offset_data.offsets[level_idx][tileY][tileX] = tileOffset; } } } } // marker output is also static int ReadOffsets(OffsetData& offset_data, const unsigned char* head, const unsigned char*& marker, const size_t size, const char** err) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offset_data.offsets[l][dy][dx] = offset; } } } return TINYEXR_SUCCESS; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_x - exr_header->data_window.min_x == std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if (data_width > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } if (exr_header->tiled) { if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. OffsetData offset_data; size_t num_blocks = 0; // For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header. // If chunk_count > 0 then chunk_count must be equal to the calculated tile count. if (exr_header->tiled) { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header); num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles); if (exr_header->chunk_count > 0) { if (exr_header->chunk_count != num_blocks) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } } int ret = ReadOffsets(offset_data, head, marker, size, err); if (ret != TINYEXR_SUCCESS) return ret; if (IsAnyOffsetsAreInvalid(offset_data)) { ReconstructTileOffsets(offset_data, exr_header, head, marker, size, exr_header->multipart, exr_header->non_image); } } else if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); InitSingleResolutionOffsets(offset_data, num_blocks); } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } InitSingleResolutionOffsets(offset_data, num_blocks); } if (!exr_header->tiled) { std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader &exr_header, std::vector<std::string> &layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader &exr_header, const std::string layer_name, std::vector<LayerChannel> &channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */ NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer( exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * static_cast<int>(exr_header.tile_size_x) + i; const int jj = exr_image.tiles[it].offset_y * static_cast<int>(exr_header.tile_size_y) + j; const int idx = ii + jj * static_cast<int>(exr_image.width); // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); exr_header->multipart = version->multipart ? 1 : 0; exr_header->non_image = version->non_image ? 1 : 0; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } namespace tinyexr { // out_data must be allocated initially with the block-header size // of the current image(-part) type static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data, const unsigned char* const* images, const int* requested_pixel_types, int compression_type, int line_order, int width, // for tiled : tile.width int height, // for tiled : header.tile_size_y int x_stride, // for tiled : header.tile_size_x int line_no, // for tiled : 0 int num_lines, // for tiled : tile.height size_t pixel_data_size, const std::vector<ChannelInfo>& channels, const std::vector<size_t>& channel_offset_list, const void* compression_param = 0) // zfp compression param { size_t buf_size = static_cast<size_t>(width) * static_cast<size_t>(num_lines) * static_cast<size_t>(pixel_data_size); //int last2bit = (buf_size & 3); // buf_size must be multiple of four //if(last2bit) buf_size += 4 - last2bit; std::vector<unsigned char> buf(buf_size); size_t start_y = static_cast<size_t>(line_no); for (size_t c = 0; c < channels.size(); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(&f32.f); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned short val = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { float val = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned int val = reinterpret_cast<const unsigned int * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) out_data.insert(out_data.end(), buf.begin(), buf.end()); } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, width, num_lines); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param); std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); return false; } return true; } static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header, const std::vector<tinyexr::ChannelInfo>& channels, std::vector<std::vector<unsigned char> >& data_list, size_t start_index, // for data_list int num_x_tiles, int num_y_tiles, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const void* compression_param, // must be set if zfp compression is enabled std::string* err) { int num_tiles = num_x_tiles * num_y_tiles; assert(num_tiles == level_image->num_tiles); if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) && level_image->level_x == 0 && level_image->level_y == 0) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = tile_count++) < num_tiles) { #else // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_tiles; i++) { #endif size_t tile_idx = static_cast<size_t>(i); size_t data_idx = tile_idx + start_index; int x_tile = i % num_x_tiles; int y_tile = i / num_x_tiles; EXRTile& tile = level_image->tiles[tile_idx]; const unsigned char* const* images = static_cast<const unsigned char* const*>(tile.images); data_list[data_idx].resize(5*sizeof(int)); size_t data_header_size = data_list[data_idx].size(); bool ret = EncodePixelData(data_list[data_idx], images, exr_header->requested_pixel_types, exr_header->compression_type, 0, // increasing y tile.width, exr_header->tile_size_y, exr_header->tile_size_x, 0, tile.height, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; } assert(data_list[data_idx].size() > data_header_size); int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size); //tileX, tileY, levelX, levelY // pixel_data_size(int) memcpy(&data_list[data_idx][0], &x_tile, sizeof(int)); memcpy(&data_list[data_idx][4], &y_tile, sizeof(int)); memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int)); memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int)); memcpy(&data_list[data_idx][16], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[data_idx][0])); swap4(reinterpret_cast<int*>(&data_list[data_idx][4])); swap4(reinterpret_cast<int*>(&data_list[data_idx][8])); swap4(reinterpret_cast<int*>(&data_list[data_idx][12])); swap4(reinterpret_cast<int*>(&data_list[data_idx][16])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } return TINYEXR_SUCCESS; } static int NumScanlines(int compression_type) { int num_scanlines = 1; if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } return num_scanlines; } static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, const std::vector<ChannelInfo>& channels, int num_blocks, tinyexr_uint64 chunk_offset, // starting offset of current chunk bool is_multipart, OffsetData& offset_data, // output block offsets, must be initialized std::vector<std::vector<unsigned char> >& data_list, // output tinyexr_uint64& total_size, // output: ending offset of current chunk std::string* err) { int num_scanlines = NumScanlines(exr_header->compression_type); data_list.resize(num_blocks); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; { size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } } const void* compression_param = 0; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { std::string e; bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes, &e); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } compression_param = &zfp_compression_param; } #endif tinyexr_uint64 offset = chunk_offset; tinyexr_uint64 doffset = is_multipart ? 4u : 0u; if (exr_image->tiles) { const EXRImage* level_image = exr_image; size_t block_idx = 0; tinyexr::tinyexr_uint64 block_data_size = 0; int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { if (!level_image) { if (err) { (*err) += "Invalid number of tiled levels for EncodeChunk\n"; } return TINYEXR_ERROR_INVALID_DATA; } int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); if (level_index_from_image != level_index) { if (err) { (*err) += "Incorrect level ordering in tiled image\n"; } return TINYEXR_ERROR_INVALID_DATA; } int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); std::string e; int ret = EncodeTiledLevel(level_image, exr_header, channels, data_list, block_idx, num_x_tiles, num_y_tiles, channel_offset_list, pixel_data_size, compression_param, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty() && err) { (*err) += e; } return ret; } for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j) for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) { offset_data.offsets[level_index][j][i] = offset; swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i])); offset += data_list[block_idx].size() + doffset; block_data_size += data_list[block_idx].size(); ++block_idx; } level_image = level_image->next_level; } assert(block_idx == num_blocks); total_size = offset; } else { // scanlines std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); std::vector<std::thread> workers; std::atomic<int> block_count(0); int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks); for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = block_count++) < num_blocks) { #else bool invalid_data(false); #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { #endif int start_y = num_scanlines * i; int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height); int num_lines = end_Y - start_y; const unsigned char* const* images = static_cast<const unsigned char* const*>(exr_image->images); data_list[i].resize(2*sizeof(int)); size_t data_header_size = data_list[i].size(); bool ret = EncodePixelData(data_list[i], images, exr_header->requested_pixel_types, exr_header->compression_type, 0, // increasing y exr_image->width, exr_image->height, exr_image->width, start_y, num_lines, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; // "break" cannot be used with OpenMP } assert(data_list[i].size() > data_header_size); int data_len = static_cast<int>(data_list[i].size() - data_header_size); memcpy(&data_list[i][0], &start_y, sizeof(int)); memcpy(&data_list[i][4], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[i][0])); swap4(reinterpret_cast<int*>(&data_list[i][4])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode scanline data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size() + doffset; } total_size = static_cast<size_t>(offset); } return TINYEXR_SUCCESS; } // can save a single or multi-part image (no deep* formats) static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory_out == NULL) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } { for (unsigned int i = 0; i < num_parts; ++i) { if (exr_headers[i]->compression_type < 0) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #else for (int c = 0; c < exr_header->num_channels; ++c) { if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif } } std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version // using value from the first header int long_name = exr_headers[0]->long_name; { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->non_image) { marker[1] |= 0x8; } */ // tiled if (num_parts == 1 && exr_images[0].tiles) { marker[1] |= 0x2; } // long_name if (long_name) { marker[1] |= 0x4; } // multipart if (num_parts > 1) { marker[1] |= 0x10; } memory.insert(memory.end(), marker, marker + 4); } int total_chunk_count = 0; std::vector<int> chunk_count(num_parts); std::vector<OffsetData> offset_data(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { if (!exr_images[i].tiles) { int num_scanlines = NumScanlines(exr_headers[i]->compression_type); chunk_count[i] = (exr_images[i].height + num_scanlines - 1) / num_scanlines; InitSingleResolutionOffsets(offset_data[i], chunk_count[i]); total_chunk_count += chunk_count[i]; } else { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); chunk_count[i] = InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); total_chunk_count += chunk_count[i]; } } } // Write attributes to memory buffer. std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts); { std::set<std::string> partnames; for (unsigned int i = 0; i < num_parts; ++i) { //channels { std::vector<unsigned char> data; for (int c = 0; c < exr_headers[i]->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_headers[i]->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_headers[i]->channels[c].name); channels[i].push_back(info); } tinyexr::WriteChannelInfo(data, channels[i]); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_headers[i]->compression_type; swap4(&comp); WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char*>(&comp), 1); } { int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 }; swap4(&data[0]); swap4(&data[1]); swap4(&data[2]); swap4(&data[3]); WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4); int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 }; swap4(&data0[0]); swap4(&data0[1]); swap4(&data0[2]); swap4(&data0[3]); // Note: must be the same across parts (currently, using value from the first header) WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { // Note: must be the same across parts float aspectRatio = 1.0f; swap4(&aspectRatio); WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; swap4(&center[0]); swap4(&center[1]); WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float)); } { float w = 1.0f; swap4(&w); WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char*>(&w), sizeof(float)); } if (exr_images[i].tiles) { unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3); if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u); //unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int datai[3] = { 0, 0, 0 }; unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]); datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x); datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y); data[8] = tile_mode; swap4(reinterpret_cast<unsigned int*>(&data[0])); swap4(reinterpret_cast<unsigned int*>(&data[4])); WriteAttributeToMemory( &memory, "tiles", "tiledesc", reinterpret_cast<const unsigned char*>(data), 9); } // must be present for multi-part files - according to spec. if (num_parts > 1) { // name { size_t len = 0; if ((len = strlen(exr_headers[i]->name)) > 0) { partnames.insert(std::string(exr_headers[i]->name)); if (partnames.size() != i + 1) { SetErrorMessage("'name' attributes must be unique for a multi-part file", err); return 0; } WriteAttributeToMemory( &memory, "name", "string", reinterpret_cast<const unsigned char*>(exr_headers[i]->name), static_cast<int>(len)); } else { SetErrorMessage("Invalid 'name' attribute for a multi-part file", err); return 0; } } // type { const char* type = "scanlineimage"; if (exr_images[i].tiles) type = "tiledimage"; WriteAttributeToMemory( &memory, "type", "string", reinterpret_cast<const unsigned char*>(type), static_cast<int>(strlen(type))); } // chunkCount { WriteAttributeToMemory( &memory, "chunkCount", "int", reinterpret_cast<const unsigned char*>(&chunk_count[i]), 4); } } // Custom attributes if (exr_headers[i]->num_custom_attributes > 0) { for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) { tinyexr::WriteAttributeToMemory( &memory, exr_headers[i]->custom_attributes[j].name, exr_headers[i]->custom_attributes[j].type, reinterpret_cast<const unsigned char*>( exr_headers[i]->custom_attributes[j].value), exr_headers[i]->custom_attributes[j].size); } } { // end of header memory.push_back(0); } } } if (num_parts > 1) { // end of header list memory.push_back(0); } tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64); tinyexr_uint64 total_size = 0; std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { std::string e; int ret = EncodeChunk(&exr_images[i], exr_headers[i], channels[i], chunk_count[i], // starting offset of current chunk after part-number chunk_offset, num_parts > 1, offset_data[i], // output: block offsets, must be initialized data_lists[i], // output total_size, // output &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return 0; } chunk_offset = total_size; } // Allocating required memory if (total_size == 0) { // something went wrong tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char*>(malloc(total_size)); // Writing header memcpy((*memory_out), &memory[0], memory.size()); unsigned char* memory_ptr = *memory_out + memory.size(); size_t sum = memory.size(); // Writing offset data for chunks for (unsigned int i = 0; i < num_parts; ++i) { if (exr_images[i].tiles) { const EXRImage* level_image = &exr_images[i]; int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) { size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size(); sum += num_bytes; assert(sum <= total_size); memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]), num_bytes); memory_ptr += num_bytes; } level_image = level_image->next_level; } } else { size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]); sum += num_bytes; assert(sum <= total_size); std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0]; memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes); memory_ptr += num_bytes; } } // Writing chunk data for (unsigned int i = 0; i < num_parts; ++i) { for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) { if (num_parts > 1) { sum += 4; assert(sum <= total_size); unsigned int part_number = i; swap4(&part_number); memcpy(memory_ptr, &part_number, 4); memory_ptr += 4; } sum += data_lists[i][j].size(); assert(sum <= total_size); memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size()); memory_ptr += data_lists[i][j].size(); } } assert(sum == total_size); return total_size; // OK } } // tinyexr size_t SaveEXRImageToMemory(const EXRImage* exr_image, const EXRHeader* exr_header, unsigned char** memory_out, const char** err) { return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err); } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2 || memory_out == NULL) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err); } int SaveEXRMultipartImageToFile(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, const char* filename, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(&dx); tinyexr::swap4(&dy); tinyexr::swap4(&dw); tinyexr::swap4(&dh); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(&x); tinyexr::swap4(&y); tinyexr::swap4(&w); tinyexr::swap4(&h); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(&line_no); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->next_level = NULL; exr_image->level_x = 0; exr_image->level_y = 0; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } EXRSetNameAttr(exr_header, NULL); return TINYEXR_SUCCESS; } void EXRSetNameAttr(EXRHeader* exr_header, const char* name) { if (exr_header == NULL) { return; } memset(exr_header->name, 0, 256); if (name != NULL) { size_t len = std::min(strlen(name), (size_t)255); if (len) { memcpy(exr_header->name, name, len); } } } int EXRNumLevels(const EXRImage* exr_image) { if (exr_image == NULL) return 0; if(exr_image->images) return 1; // scanlines int levels = 1; const EXRImage* level_image = exr_image; while((level_image = level_image->next_level)) ++levels; return levels; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_image->next_level) { FreeEXRImage(exr_image->next_level); delete exr_image->next_level; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); memset(exr_header, 0, sizeof(EXRHeader)); ConvertHeader(exr_header, infos[i]); exr_header->multipart = exr_version->multipart ? 1 : 0; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (err != 0) { // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<tinyexr::OffsetData> chunk_offset_table_list; chunk_offset_table_list.reserve(num_parts); for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1); tinyexr::OffsetData& offset_data = chunk_offset_table_list.back(); if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) { tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count); std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0]; for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } } else { { std::vector<int> num_x_tiles, num_y_tiles; tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles); if (num_blocks != exr_headers[i]->chunk_count) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number' marker += sizeof(tinyexr::tinyexr_uint64); // = 8 } } } } } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { tinyexr::OffsetData &offset_data = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { const unsigned char *part_number_addr = memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEFINED #endif // TINYEXR_IMPLEMENTATION
10_omp_empty.c
// clang-format off // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s // REQUIRES: openmp // clang-format on #include "omp.h" // CHECK-NOT: {{.*}} __typeart_alloc void foo(int* x) { #pragma omp parallel // transformed to @__kmpc_fork_call { *x = -1; } #pragma omp parallel for for (int i = 0; i < x[10]; ++i) { x[i] = i; } } // Standard filter // CHECK: > Stack Memory // CHECK-NEXT: Alloca : // CHECK-NEXT: Stack call filtered % : 100.00
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % John Cristy % % July 1998 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 131072UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,RGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ exception=(&image->exception); x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict paint_indexes; register ssize_t x; register PixelPacket *restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsMagickGray(fill) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); if ((fill->opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color opaque. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill->red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill->green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill->blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(fill->opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill->index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_unaryop__identity_fp64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp64_int64 // op(A') function: GB_tran__identity_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp64_int64 ( double *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
HSetMaintainer.h
#ifndef HSET_MAINTAINER_H #define HSET_MAINTAINER_H /************************************************************* * Copyright: (C) 2012 by Markus Schordan * * Author : Markus Schordan * * License : see file LICENSE in the CodeThorn distribution * *************************************************************/ #include <boost/unordered_set.hpp> //#define HSET_MAINTAINER_DEBUG_MODE /*! * \author Markus Schordan * \date 2012. */ template<typename KeyType,typename HashFun, typename EqualToPred> class HSetMaintainer : public boost::unordered_set<KeyType*,HashFun,EqualToPred> { public: typedef pair<bool,const KeyType*> ProcessingResult; /*! * \author Marc Jasper * \date 2016. */ HSetMaintainer() { _keepStatesDuringDeconstruction = false; } /*! * \author Marc Jasper * \date 2016. */ HSetMaintainer(bool keepStates) { _keepStatesDuringDeconstruction = keepStates; } /*! * \author Marc Jasper * \date 2016. */ virtual ~HSetMaintainer() { if (!_keepStatesDuringDeconstruction){ typename HSetMaintainer::iterator i; for (i=this->begin(); i!=this->end(); ++i) { delete (*i); } } } bool exists(KeyType& s) { return determine(s)!=0; } size_t id(const KeyType& s) { typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator i; i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(s); if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) { // in lack of operator '-' we compute the distance size_t pos=0; typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator b; b=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin(); while(b!=i) { pos++; ++b; } return pos; } else throw "Error: unknown value. Maintainer cannot determine an id."; } typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i; KeyType* determine(KeyType& s) { KeyType* ret=0; typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i; #pragma omp critical(HASHSET) { i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(&s); if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) { ret=const_cast<KeyType*>(*i); } else { ret=0; } } return ret; } const KeyType* determine(const KeyType& s) { const KeyType* ret=0; typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i; #pragma omp critical(HASHSET) { i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(const_cast<KeyType*>(&s)); if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) { ret=const_cast<KeyType*>(*i); } else { ret=0; } } return ret; } ProcessingResult process(const KeyType* key) { ProcessingResult res2; #pragma omp critical(HASHSET) { std::pair<typename HSetMaintainer::iterator, bool> res; typename HSetMaintainer::iterator iter=this->find(const_cast<KeyType*>(key)); // TODO: eliminate const_cast if(iter!=this->end()) { // found it! res=make_pair(iter,false); } else { res=this->insert(const_cast<KeyType*>(key)); // TODO: eliminate const_cast } res2=make_pair(res.second,*res.first); } return res2; } const KeyType* processNewOrExisting(const KeyType* s) { ProcessingResult res=process(s); return res.second; } //! <true,const KeyType> if new element was inserted //! <false,const KeyType> if element already existed ProcessingResult process(KeyType key) { ProcessingResult res2; #pragma omp critical(HASHSET) { std::pair<typename HSetMaintainer::iterator, bool> res; typename HSetMaintainer::iterator iter=this->find(&key); if(iter!=this->end()) { // found it! res=make_pair(iter,false); } else { // converting the stack allocated object to heap allocated // this copies the entire object // TODO: this can be avoided by providing a process function with a pointer arg // this requires a more detailed result: pointer exists, alternate pointer with equal object exists, does not exist KeyType* keyPtr=new KeyType(); *keyPtr=key; res=this->insert(keyPtr); if (!res.second) { // this case should never occur, condition "iter!=this->end()" above would have been satisfied and // this else branch would have therefore been ignored cerr << "ERROR: HSetMaintainer: Element was not inserted even though it could not be found in the set." << endl; ROSE_ASSERT(0); delete keyPtr; keyPtr = NULL; } } #ifdef HSET_MAINTAINER_DEBUG_MODE std::pair<typename HSetMaintainer::iterator, bool> res1; res1=this->insert(key); std::pair<typename HSetMaintainer::iterator, bool> res2; res2=this->insert(key); if(!(res1==res2)) { cerr<< "Error: HsetMaintainer failed:"<<endl; cerr<< "res1:"<<(*res1.first).toString()<<":"<<res1.second<<endl; cerr<< "res2:"<<(*res2.first).toString()<<":"<<res2.second<<endl; exit(1); } cerr << "HSET insert OK"<<endl; #endif res2=make_pair(res.second,*res.first); } return res2; } const KeyType* processNew(KeyType& s) { //std::pair<typename HSetMaintainer::iterator, bool> res=process(s); ProcessingResult res=process(s); if(res.first!=true) { cerr<< "Error: HsetMaintainer::processNew failed:"<<endl; cerr<< "res:"; cout <<":"<<res.first<<endl; cout <<res.second->toString(); exit(1); } return res.second; } const KeyType* processNewOrExisting(KeyType& s) { ProcessingResult res=process(s); return res.second; } long numberOf() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::size(); } long maxCollisions() { size_t max=0; for(size_t i=0; i<HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_count();++i) { if(HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i)>max) { max=HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i); } } return max; } double loadFactor() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::load_factor(); } long memorySize() const { long mem=0; for(typename HSetMaintainer<KeyType,HashFun,EqualToPred>::const_iterator i =HSetMaintainer<KeyType,HashFun,EqualToPred>::begin(); i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end(); ++i) { mem+=(*i)->memorySize(); mem+=sizeof(*i); } return mem+sizeof(*this); } private: //const KeyType* ptr(KeyType& s) {} bool _keepStatesDuringDeconstruction; }; #endif
zero_length_array_section_exit.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int arr[5]; // CHECK: addr=0x[[#%x,HOST_ADDR:]] fprintf(stderr, "addr=%p\n", arr); // CHECK-NOT: Libomptarget #pragma omp target enter data map(alloc: arr[0:5]) #pragma omp target exit data map(present, release: arr[0:0]) // CHECK: arr is present fprintf(stderr, "arr is present\n"); // arr[0:0] doesn't create an actual mapping in the first directive. // // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] (0 bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target enter data map(alloc: arr[0:0]) #pragma omp target exit data map(present, release: arr[0:0]) // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
GB_unaryop__identity_int8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_uint64 // op(A') function: GB_tran__identity_int8_uint64 // C type: int8_t // A type: uint64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_uint64 ( int8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdio.h> #include <omp.h> #include <math.h> int main(void) { double A = 2.0; float B = 2.0; #pragma omp target map(A,B) { A = powi(A, 4); //B = powif(B, 4); } printf("%lf\n",A); //printf("%f\n",B); return 0; }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" #define ThrowWandException(severity,tag,context) \ { \ (void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \ tag,"`%s'",context); \ return(MagickFalse); \ } /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
GB_unaryop__identity_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_fp32 // op(A') function: GB_tran__identity_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z ; GB_CAST_SIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_fp32 ( int64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int16) // A.*B function (eWiseMult): GB (_AemultB_03__bor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int16) // C=scalar+B GB (_bind1st__bor_int16) // C=scalar+B' GB (_bind1st_tran__bor_int16) // C=A+scalar GB (_bind2nd__bor_int16) // C=A'+scalar GB (_bind2nd_tran__bor_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT16 || GxB_NO_BOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
special_ops.h
#pragma once #include <ops/ops.h> #include <loops/reduce.h> #include <loops/scalar.h> #include <loops/indexreduce.h> #include <loops/broadcasting.h> namespace functions { namespace broadcast { template <typename T> class Broadcast; } namespace transform { template <typename T> class Transform; } namespace scalar { } namespace reduce { template <typename T> class ReduceFunction; } } namespace simdOps { template<typename T> class Pooling2D { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ int kH; __shared__ int kW; __shared__ int sH; __shared__ int sW; __shared__ int pH; __shared__ int pW; __shared__ int dH; __shared__ int dW; __shared__ int poolingMode; __shared__ T extraParam0; __shared__ int batchSize; __shared__ int inChannels; __shared__ int outH; __shared__ int outW; __shared__ int inH; __shared__ int inW; //__shared__ int *strideIn; //__shared__ int *strideOut; __shared__ int strideB; __shared__ int strideC; __shared__ int strideY; __shared__ int strideX; __shared__ int strideOB; __shared__ int strideOC; __shared__ int strideOY; __shared__ int strideOX; __shared__ int length; __shared__ int kHEff; __shared__ int kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { kH = (int)extraParams[0]; kW = (int)extraParams[1]; sH = (int)extraParams[2]; sW = (int)extraParams[3]; pH = (int)extraParams[4]; pW = (int)extraParams[5]; dH = (int)extraParams[6]; //Dilation, height dimension dW = (int)extraParams[7]; //Dilation, width dimension poolingMode = (int)extraParams[9]; extraParam0 = extraParams[10]; batchSize = shape::sizeAt(xShapeBuffer, 0); inChannels = shape::sizeAt(xShapeBuffer, 1); outH = shape::sizeAt(resultShapeBuffer, 2); outW = shape::sizeAt(resultShapeBuffer, 3); inH = shape::sizeAt(xShapeBuffer, 2); inW = shape::sizeAt(xShapeBuffer, 3); strideB = shape::stride(xShapeBuffer)[0]; strideC = shape::stride(xShapeBuffer)[1]; strideY = shape::stride(xShapeBuffer)[2]; strideX = shape::stride(xShapeBuffer)[3]; strideOB = shape::stride(resultShapeBuffer)[0]; strideOC = shape::stride(resultShapeBuffer)[1]; strideOY = shape::stride(resultShapeBuffer)[2]; strideOX = shape::stride(resultShapeBuffer)[3]; length = shape::length(resultShapeBuffer); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); fOrder = shape::order(resultShapeBuffer) == 'f'; /* if (blockIdx.x == 0) { printf("kH: %i; kW: %i; sH: %i; sW: %i; pH: %i; pW: %i; dH: %i; dW: %i; poolingMode: %i; extraParam0: %f;\n", kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, (float) extraParam0); printf("batchSize: %i; inChannels: %i; outH: %i; outW: %i; inH: %i; inW: %i; strideB: %i; strideC: %i; strideY: %i; strideX: %i;\n", batchSize, inChannels, outH, outW, inH, inW, strideB, strideC, strideY, strideX); } */ } __syncthreads(); int tid = blockIdx.x * gridDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % outW; const int ph = (index / outW) % outH; const int c = (index / outW / outH) % inChannels; const int n = index / outW / outH / inChannels; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; // const int hSO = hstart; // const int hEO = hend; if(hstart < 0){ int f = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / (T)dH); hstart += f * dH; } if(wstart < 0){ int f = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / (T) dW); wstart += f * dW; } if(hend > inH){ int f = (int)nd4j::math::nd4j_ceil<T>((T) (hend-inH) / (T) dH); hend -= f * dH; } if(wend > inW){ int f = (int)nd4j::math::nd4j_ceil<T>((T) (wend-inW) / (T) dW); wend -= f * dW; } int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart) / (T) dH) * (int) nd4j::math::nd4j_ceil<T>((T) (wend-wstart) / (T) dW)); //Accounts for dilation T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0; T *input_slice = dx + (n * strideB + c * strideC); if (poolingMode == 0) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { T v = input_slice[h * strideY + w * strideX]; if (v > sum) sum = v; } } } else if (poolingMode == 1) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += input_slice[h * strideY + w * strideX]; } } } else if (poolingMode == 2) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(input_slice[h * strideY + w * strideX]), extraParam0); } } } T res; if (poolingMode == 0) { res = sum; } else if (poolingMode == 1) { int divide_factor = pool_size; //Case 0: exclude padding if ((int) extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; res = sum / divide_factor; } else if (poolingMode == 2) { res = nd4j::math::nd4j_pow<T>(sum, (T) 1.0f / extraParam0); } if (!fOrder) { result[index] = res; } else { result[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = res; } /* if (index >= 0 && index < 400000) { printf("index: %i; hstart: %i; hend: %i; wstart: %i; wend: %i; ph: %i; pw: %i; hstart_orig: %i; hend_orig: %i;\n", index, hstart, hend, wstart, wend, ph, pw, hSO, hEO); } */ } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int kH = (int)extraParams[0]; int kW = (int)extraParams[1]; int sH = (int)extraParams[2]; int sW = (int)extraParams[3]; int pH = (int)extraParams[4]; int pW = (int)extraParams[5]; int dH = (int)extraParams[6]; //Dilation, height dimension int dW = (int)extraParams[7]; //Dilation, width dimension int poolingMode = (int)extraParams[9]; T extraParam0 = extraParams[10]; const int kHEff = kH + (kH-1)*(dH-1); const int kWEff = kW + (kW-1)*(dW-1); const int batchSize = (int) shape::sizeAt(xShapeBuffer, 0); const int inChannels = (int) shape::sizeAt(xShapeBuffer, 1); const int outH = (int) shape::sizeAt(resultShapeBuffer, 2); const int outW = (int) shape::sizeAt(resultShapeBuffer, 3); const int inH = (int) shape::sizeAt(xShapeBuffer, 2); const int inW = (int) shape::sizeAt(xShapeBuffer, 3); auto strideIn = shape::stride(xShapeBuffer); auto strideOut = shape::stride(resultShapeBuffer); const bool fOrder = shape::order(resultShapeBuffer) == 'f'; const Nd4jLong zLength = shape::length(resultShapeBuffer); const int zRank = shape::rank(resultShapeBuffer); int indices[6]; int idx = 0; #pragma omp parallel for collapse(2) schedule(guided) shared(indices) for(int k = 0; k < inChannels; k++) { for(int p = 0; p < batchSize; p++) { int xx, yy; /* For all output pixels... */ const int _b = p * strideOut[0]; const int _k = k * strideOut[1]; T *ptr_output = result + _b + _k; T *ptr_input = dx + p * strideIn[0] + k * strideIn[1]; for(yy = 0; yy < outH; yy++) { for(xx = 0; xx < outW; xx++) { /* Compute the mean of the input image... */ int hstart = yy * sH - pH; int wstart = xx * sW - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; const int hSO = hstart; const int hEO = hend; if(hstart < 0){ int n = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / ((T)dH)); hstart += n * dH; } if(wstart < 0){ int n = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / ((T)dW)); wstart += n * dW; } if(hend > inH){ int n = (int)nd4j::math::nd4j_ceil<T>((T)(hend-inH)/((T)dH)); hend -= n * dH; } if(wend > inW){ int n = (int)nd4j::math::nd4j_ceil<T>((T)(wend-inW)/((T)dW)); wend -= n * dW; } int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart)/((T)dH)) * (int)nd4j::math::nd4j_ceil<T>((T)(wend-wstart)/((T)dW))); //Accounts for dilation T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0; // we need this only for avg pooling int divide_factor = 0; if (poolingMode == 1) { if ((int) extraParam0 == 0) //Exclude padding divide_factor = pool_size; else if ((int) extraParam0 == 1) //Include padding divide_factor = kH * kW; } long kx, ky; if (poolingMode == 0) { #pragma omp simd reduction(maxT:sum) collapse(2) for (ky = hstart; ky < hend; ky += dH) { for (kx = wstart; kx < wend; kx += dW) if (ptr_input[ky * strideIn[2] + kx * strideIn[3]] > sum) sum = ptr_input[ky * strideIn[2] + kx * strideIn[3]]; } } else if (poolingMode == 1) { #pragma omp simd reduction(sumT:sum) collapse(2) for (ky = hstart; ky < hend; ky += dH) { for (kx = wstart; kx < wend; kx += dW) sum += ptr_input[ky * strideIn[2] + kx * strideIn[3]]; } } else if (poolingMode == 2) { #pragma omp simd reduction(sumT:sum) collapse (2) for (ky = hstart; ky < hend; ky += dH) { for (kx = wstart; kx < wend; kx += dW) sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(ptr_input[ky * strideIn[2] + kx * strideIn[3]]), extraParam0); } } /* Update output */ T res = sum; if (poolingMode == 1) { res /= divide_factor; } else if (poolingMode == 2) res = nd4j::math::nd4j_pow<T>(res, (T) 1.0f / extraParam0); if (!fOrder) { *ptr_output++ = res; } else { result[_b + _k + yy * strideOut[2] + xx * strideOut[3]] = res; } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; FORCEINLINE bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template<typename T> class Im2col { public: static const bool requiresSpecial = true; static _CUDA_HD int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ int kernelHeight = (int)extraParams[0]; int kernelWidth = (int)extraParams[1]; int strideY = (int)extraParams[2]; int strideX = (int)extraParams[3]; int padHeight = (int)extraParams[4]; int padWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation, height/y dimension int dX = (int)extraParams[7]; //Dilation, width/x dimension int kSize = kernelWidth * kernelHeight; T zeroPadVal = (T)extraParams[9]; //Value to use when value is padding. Usually 0 but not always auto outShape = shape::shapeOf(resultShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int samples = inShape[0]; int depth = inShape[1]; int height = inShape[2]; int width = inShape[3]; int strideex = inStride[0]; int stridech = inStride[1]; int strideh = inStride[2]; int stridew = inStride[3]; // (height + 2 * padHeight - kernelHeight) / strideX + 1; // // (width + 2 * padWidth - kernelWidth) / strideY + 1; // int height_col = outShape[4]; int width_col = outShape[5]; int n = samples * depth * height_col * width_col; /* if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, height, width, depth, n, samples); */ int index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < n; index += blockDim.x*gridDim.x) { int h_index = index / width_col; int h_col = h_index % height_col; int w_col = index % width_col; int c_im = h_index / height_col; int c_col = c_im * kSize; int depth_im = c_im % depth; int num_im = c_im / depth; int h_offset = h_col * strideY - padHeight; int w_offset = w_col * strideX - padWidth; T* data_col_ptr = result; int i_c = (c_col * height_col + h_col) * width_col + w_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; T* data_im_ptr = dx; data_im_ptr += num_im * strideex + depth_im * stridech + h_offset * strideh + w_offset*stridew; for (int i = 0; i < kernelHeight; ++i) { for (int j = 0; j < kernelWidth; ++j) { int h_im = h_offset + i * dY; int w_im = w_offset + j * dX; int i_f = 0; int i_c_temp = i_c; for (int dim = 5; dim >= 0; dim--) { i_f += (i_c_temp % outShape[dim]) * outStride[dim]; i_c_temp = i_c_temp / outShape[dim]; } if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width){ result[i_f] = data_im_ptr[i * dY * strideh + j * dX * stridew]; } else result[i_f] = zeroPadVal; //result[i_f] = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * strideh + j*stridew] : 0; data_col_ptr += height_col * width_col; i_c += height_col * width_col; } } } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ T zeroPadVal = (T) 0.0f; int kH = (int)extraParams[0]; int kW = (int)extraParams[1]; int sH = (int)extraParams[2]; int sW = (int)extraParams[3]; int pH = (int)extraParams[4]; int pW = (int)extraParams[5]; int dH = (int)extraParams[6]; //Dilation, height/y dimension int dW = (int)extraParams[7]; //Dilation, width/x dimension auto outShape = shape::shapeOf(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); const int bS = inShape[0]; const int iC = inShape[1]; const int iH = inShape[2]; const int iW = inShape[3]; const int oH = outShape[4]; const int oW = outShape[5]; const int outStride0 = outStride[0]; const int outStride1 = outStride[1]; const int outStride2 = outStride[2]; const int outStride3 = outStride[3]; const int outStride4 = outStride[4]; const int outStride5 = outStride[5]; const int inStride0 = inStride[0]; const int inStride1 = inStride[1]; const int inStride2 = inStride[2]; const int inStride3 = inStride[3]; const T* in0End = dx + inStride1 * iC; const int kRowEnd = -pH + kH * dH; const int kColEnd = -pW + kW * dW; const int oHW = oH * oW; const int inRowEnd = oH * sH; const int inColEnd = oW * sW; int inRowStart, inColStart, inRow, inCol; T *in0, *in1; if (shape::order(xShapeBuffer) == 'c' && shape::order(resultShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(xShapeBuffer) && shape::strideDescendingCAscendingF(resultShapeBuffer)) { #pragma omp parallel for schedule(static) proc_bind(close) private(in0, in1, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { in0 = dx + (b * inStride0); T *output = result + (b * outStride0); for (int channel = 0; channel < iC; ++channel, in0 += inStride1) { for (int kRow = 0; kRow < kH; kRow++) { inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; kCol++) { inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow += sH) { if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) for (int outCol = 0; outCol < oW; ++outCol, ++output) { *output = zeroPadVal; } else { inCol = inColStart; in1 = in0 + inRow * inStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, ++output) if (is_a_ge_zero_and_a_lt_b(inCol, iW)) *output = *(in1 + inCol * inStride3); else *output = zeroPadVal; } } } } } } } else { T *out0, *out1, *out2, *out3, *out4; #pragma omp parallel for schedule(static) proc_bind(close) private(in0, in1, out0, out1, out2, out3, out4, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { in0 = dx + (b * inStride0); out0 = result + b * outStride0; for (int channel = 0; channel < iC; ++channel, in0 += inStride1, out0+=outStride1) { out1 = out0; for (int kRow = 0; kRow < kH; kRow++, out1 += outStride2) { out2 = out1; inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; kCol++, out2 += outStride3) { out3 = out2; inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow += sH, out3 += outStride4) { out4 = out3; if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) for (int outCol = 0; outCol < oW; ++outCol, out4 += outStride5) { *out4 = zeroPadVal; } else { inCol = inColStart; in1 = in0 + inRow * inStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, out4 += outStride5) { if (is_a_ge_zero_and_a_lt_b(inCol, iW)) *out4 = *(in1 + inCol * inStride3); else *out4 = zeroPadVal; } } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T> class Histogram { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int numBins = (int) extraParams[0]; T min_val = extraParams[1]; T max_val = extraParams[2]; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ T *bins; __shared__ int length; __shared__ T *reductor; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; bins = (T *) shmem; reductor = ((T *) allocationPointer) + (numBins * blockIdx.x); length = shape::length(xShapeBuffer); } __syncthreads(); T binSize = (max_val - min_val) / (numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (T) 0.0f; } __syncthreads(); for (int e = tid; e < length; e+= blockDim.x * gridDim.x) { int idx = (int) ((dx[e] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; nd4j::math::atomics::nd4j_atomicAdd(&bins[idx], (T) 1.0f); } __syncthreads(); // transfer shared memory to reduction memory if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionPointer; __shared__ bool amLast; for (int e = threadIdx.x; e < numBins; e += blockDim.x) { reductor[e] = bins[e]; } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; // nullify shared memory for future accumulation for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (T) 0.0f; } // accumulate reduced bins for (int r = 0; r < gridDim.x; r++) { T *ptrBuf = ((T *)allocationPointer) + (r * numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] += ptrBuf[e]; } } __syncthreads(); // write them out to Z for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } } else { // if there's only 1 block - just write away data for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } }; #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int length = shape::length(xShapeBuffer); int _threads = 2; int numBins = (int) extraParams[0]; int span = (length / _threads) + 8; // get min over input T min_val = extraParams[1]; T max_val = extraParams[2]; /* #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(min:min_val) proc_bind(close) for (int x = 0; x < length; x++) { if (min_val > dx[x]) min_val = dx[x]; } // get max over input T max_val = (T) MIN_FLOAT; #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(max:max_val) proc_bind(close) for (int x = 0; x < length; x++) { if (max_val < dx[x]) max_val = dx[x]; } */ T binSize = (max_val - min_val) / (numBins); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(close) default(shared) { int tid, start, end; int *bins = new int[numBins]; std::memset(bins, 0, sizeof(int) * numBins); tid = omp_get_thread_num(); start = span * tid; end = span * (tid + 1); if (end > length) end = length; #pragma omp simd for (int x = start; x < end; x++) { int idx = (int) ((dx[x] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; bins[idx]++; } #pragma omp critical { #pragma omp simd for (int x = 0; x < numBins; x++) { result[x] += bins[x]; } } delete[] bins; } } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Col2Im { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * https://github.com/pjreddie/darknet/blob/master/src/col2im_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int strideex = inStride[0]; int stridech = inStride[1]; int stridekrow = inStride[2]; int stridekcol = inStride[3]; int striderow = inStride[4]; int stridecol = inStride[5]; int kernelHeight = inShape[2]; int kernelWidth = inShape[3]; // C int strideY = (int)extraParams[0]; int strideX = (int)extraParams[1]; int padHeight = (int)extraParams[2]; int padWidth = (int)extraParams[3]; int imgHeight = (int)extraParams[4]; int imgWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation in height/y dimension int dX = (int)extraParams[7]; //Dilation in width/x dimension auto outShape = shape::shapeOf(resultShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); int samples = outShape[0]; int depth = outShape[1]; int imgH = outShape[2]; int imgW = outShape[3]; int height_col = inShape[4];//(imgHeight + 2 * padHeight - kernelHeight) / strideX + 1; int width_col = inShape[5];//(imgWidth + 2 * padWidth - kernelWidth) / strideY + 1; int n = samples * depth * imgHeight * imgWidth; /*if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, imgHeight, imgWidth, depth, n, samples);*/ //Effective kernel size, accounting for dilation int kEffectiveW = kernelWidth + (kernelWidth - 1) * (dX - 1); int kEffectiveH = kernelHeight + (kernelHeight - 1) * (dY - 1); for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { T val = 0; int w_im = i % imgWidth + padWidth; int h_im = (i / imgWidth) % imgHeight + padHeight; int c_im = i / (imgWidth * imgHeight); int num_im = c_im / depth; int depth_im = c_im % depth; // compute the start and end of the output // These are the indexes for dimensions ??? in the 6d col matrix int w_col_start = (w_im < kEffectiveW) ? 0 : (w_im - kEffectiveW) / strideX + 1; int w_col_end = nd4j::math::nd4j_min<int>(w_im / strideX + 1, width_col); int h_col_start = (h_im < kEffectiveH) ? 0 : (h_im - kEffectiveH) / strideY + 1; int h_col_end = nd4j::math::nd4j_min<int>(h_im / strideY + 1, height_col); //Iterate over col entries in the 6d array... these are added up for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * strideY); int w_k = (w_im - w_col * strideX); if(h_k % dY == 0 && w_k % dX == 0){ h_k /= dY; w_k /= dX; int data_col_index = num_im * strideex + depth_im * stridech + h_k * stridekrow + w_k * stridekcol + h_col * striderow + w_col * stridecol; val += dx[data_col_index]; } } } int i_f = 0; int i_c = i; for (int dim = 3; dim >= 0; dim--) { i_f += (i_c % outShape[dim]) * outStride[dim]; i_c = i_c / outShape[dim]; } result[i_f] = val; } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { const Nd4jLong *inShape = shape::shapeOf(xShapeBuffer); const Nd4jLong *inStride = shape::stride(xShapeBuffer); const Nd4jLong *outShape = shape::shapeOf(xShapeBuffer); const Nd4jLong *outStride = shape::stride(resultShapeBuffer); const int kH = inShape[2]; const int kW = inShape[3]; const int bS = outShape[0]; const int iC = outShape[1]; const int oH = inShape[4]; const int oW = inShape[5]; const int sH = (int)extraParams[0]; const int sW = (int)extraParams[1]; const int pH = (int)extraParams[2]; const int pW = (int)extraParams[3]; const int iH = (int)extraParams[4]; const int iW = (int)extraParams[5]; const int dH = (int)extraParams[6]; const int dW = (int)extraParams[7]; const int inStride0 = inStride[0]; const int inStride1 = inStride[1]; const int inStride2 = inStride[2]; const int inStride3 = inStride[3]; const int inStride4 = inStride[4]; const int inStride5 = inStride[5]; const int outStride0 = outStride[0]; const int outStride1 = outStride[1]; const int outStride2 = outStride[2]; const int outStride3 = outStride[3]; const T* out0End = result + outStride1 * iC; const int kRowEnd = -pH + kH * dH; const int inStepOW = oW * inStride5; const int kColEnd = -pW + kW * dW; const int inRowEnd = oH * sH; const int inColEnd = oW * sW; int inRowStart, inColStart, inRow, inCol; T *out0, *out1, *out2; memset(result, 0, shape::length(resultShapeBuffer) * sizeof(T)); if (shape::order(xShapeBuffer) == 'c' && shape::order(resultShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(xShapeBuffer) && shape::strideDescendingCAscendingF(resultShapeBuffer)) { #pragma omp parallel for schedule(guided) proc_bind(close) private(out0, out1, out2, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { T *input = dx + (b * inStride0); out0 = result + (b * outStride0); for (int channel = 0; channel < iC; ++channel, out0 += outStride1) { for (int kRow = 0; kRow < kH; ++kRow) { inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; ++kCol) { inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow += sH) { if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) { input += inStepOW; } else { inCol = inColStart; out1 = out0 + inRow * outStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol += sW, input += inStride5) { if (is_a_ge_zero_and_a_lt_b(inCol, iW)) { out2 = out1 + inCol * outStride3; *out2 += *input; } } } } } } } } } else { T *in0, *in1, *in2, *in3, *in4; #pragma omp parallel for schedule(guided) proc_bind(close) private(in0, in1, in2, in3, in4, out0, out1, out2, inRowStart, inColStart, inRow, inCol) for (int b = 0; b < bS; b++) { out0 = result + (b * outStride0); in0 = dx + b * inStride0; for (int channel = 0; channel < iC; ++channel, out0+=outStride1, in0+=inStride1) { in1 = in0; for (int kRow = 0; kRow < kH; ++kRow, in1+=inStride2) { in2 = in1; inRowStart = -pH + kRow * dH; for (int kCol = 0; kCol < kW; ++kCol, in2+=inStride3) { in3 = in2; inRow = inRowStart; inColStart = -pW + kCol * dW; for (int outRow = 0; outRow < oH; ++outRow, inRow+=sH, in3+=inStride4) { in4 = in3; if (!is_a_ge_zero_and_a_lt_b(inRow, iH)) { in4 += inStepOW; } else { inCol = inColStart; out1 = out0 + inRow * outStride2; for (int outCol = 0; outCol < oW; ++outCol, inCol+=sW, in4+=inStride5) { if (is_a_ge_zero_and_a_lt_b(inCol, iW)) { out2 = out1 + inCol * outStride3; *out2 += *in4; } } } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T> class Reverse { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ Nd4jLong xLength; __shared__ int xEWS; __shared__ char xOrder; __shared__ Nd4jLong sLength; __shared__ T *shmem; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x == 0) { xLength = shape::length(xShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); xOrder = shape::order(xShapeBuffer); sLength = xLength - 1; extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; } __syncthreads(); if (dx == result) { if (xEWS == 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx = sLength - e; T tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS >= 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; T tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { __shared__ int xRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); xStride = shape::stride(xShapeBuffer); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } else { __shared__ int zEWS; __shared__ char zOrder; if (threadIdx.x == 0) { zEWS = shape::elementWiseStride(zShapeBuffer); zOrder = shape::order(zShapeBuffer); } __syncthreads(); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { // loop for whole array for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { __shared__ int xRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; __shared__ int zRank; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); xStride = shape::stride(xShapeBuffer); zRank = shape::rank(zShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } } #endif static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto xLength = shape::length(xShapeBuffer); auto xEWS = shape::elementWiseStride(xShapeBuffer); auto xOrder = shape::order(xShapeBuffer); auto sLength = xLength - 1; // two step phase here if (dx == result) { if (xEWS == 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx = sLength - e; T tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS > 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; T tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { int xRank = shape::rank(xShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; #pragma omp parallel for private(xCoord, zCoord) schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } else { // single step phase here auto zEWS = shape::elementWiseStride(zShapeBuffer); auto zOrder = shape::order(zShapeBuffer); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { auto xRank = shape::rank(xShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto zRank = shape::rank(zShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto zStride = shape::stride(zShapeBuffer); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; #pragma omp parallel for private(xCoord, zCoord) schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { if (xOrder == 'c') shape::ind2subC(xRank, xShape, e, xCoord); else shape::ind2sub(xRank, xShape, e, xCoord); if (zOrder == 'c') shape::ind2subC(zRank, zShape, (sLength - e), zCoord); else shape::ind2sub(zRank, zShape, (sLength - e), zCoord); auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); result[zOffset] = dx[xOffset]; } } } } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class SoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; auto length = shape::length(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //compute the row wise maxes __shared__ Nd4jLong maxShape[2]; // it's always 2d here __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) { maxResult = (T) 0.0; maxShape[0] = shape[0]; maxShape[1] = 1; maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); } __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes std::vector<T> maxResult(shape[0]); for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer)) { T max = -FLOAT_MAX_VALUE; T sum = 0; int elementWiseStride = shape::elementWiseStride(xShapeBuffer); int resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer); int length = shape::length(xShapeBuffer); if (elementWiseStride >= 1 && resultElementWiseStride >= 1) { if (elementWiseStride == 1 && resultElementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, dx[i]); } #pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } } else { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, dx[i * elementWiseStride]); } #pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { T r = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max); result[i * resultElementWiseStride] = r; sum += r; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * resultElementWiseStride] /= sum; } } } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class LogSoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //iterate along rows __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; if (threadIdx.x == 0) { maxResult = (T) 0.0; } __syncthreads(); //compute the row wise maxes Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); functions::transform::Transform<T>::template transformCuda<simdOps::Log<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes std::vector <T> maxResult(shape[0]); #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); functions::transform::Transform<T>::template exec<simdOps::Log<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { T max = -FLOAT_MAX_VALUE; T sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; result[i] = nd4j::math::nd4j_log<T>(result[i]); } } else if (elementWiseStride > 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; result[i * elementWiseStride] = nd4j::math::nd4j_log<T>(result[i * elementWiseStride]); } } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; /** * softmax(x) */ template<typename T> class SoftMaxDerivative { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; __shared__ Nd4jLong resultEWS; auto length = shape::length(xShapeBuffer); if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(resultShapeBuffer); maxResult = (T) 0.0; } __syncthreads(); auto tride = shape::stride(xShapeBuffer); Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); if (resultEWS >= 1) { for (int i = threadIdx.x; i < length; i += blockDim.x) { result[i * resultEWS] = result[i * resultEWS] * ((T) 1.0 - result[i * resultEWS]); } } else { printf("Non element wise stride not supported right now\n"); } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); auto resultEleStide = shape::elementWiseStride(resultShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; auto len = shape::length(xShapeBuffer); //compute the row wise maxes std::vector <T> maxResult(shape[0]); #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); if (resultEleStide >= 1) { if (resultEleStide == 1) { #pragma omp simd for (int i = 0; i < len; i++) { result[i] = result[i] * ((T) 1.0f - result[i]); } } else { #pragma omp simd for (int i = 0; i < len; i++) { result[i * resultEleStide] = result[i * resultEleStide] * ((T) 1.0f - result[i * resultEleStide]); } } } else { auto zShape = shape::shapeOf(resultShapeBuffer); auto zStride = shape::stride(resultShapeBuffer); auto zRank = shape::rank(resultShapeBuffer); Nd4jLong zCoord[MAX_RANK]; for (int i = 0; i < len; i++) { shape::ind2subC(zRank,zShape, i, zCoord); Nd4jLong zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); result[zOffset] = result[zOffset] * ((T) 1.0f - result[zOffset]); } } delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { T max = -FLOAT_MAX_VALUE; T sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] -= max; result[i] = nd4j::math::nd4j_exp<T>(result[i]); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] = result[i] * ((T) 1.0f - result[i]); } } else if (elementWiseStride >= 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] -= max; result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(result[i * elementWiseStride]); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] = result[i * elementWiseStride] * ((T) 1.0f - result[i * elementWiseStride]); } } else { printf("non-ews access on row not implemented yet"); } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class IsMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void doAllCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { // this code is safe to delete, it's never used /* __shared__ int maxIdx; __shared__ int length; if (threadIdx.x == 0) { length = shape::length(resultShapeBuffer); } __syncthreads(); functions::indexreduce::IndexReduce<T>::template transform<simdOps::IndexMax<T>>( dx, xShapeBuffer, extraParams, result, resultShapeBuffer, nullptr, 1, 1, allocationPointer, reductionPointer, manager, nullptr, nullptr); __syncthreads(); if (threadIdx.x == 0) maxIdx = (int)result[0]; __syncthreads(); for (int i = threadIdx.x; i < length; i += blockDim.x) result[i] = 0; __syncthreads(); if (threadIdx.x == 0) { result[maxIdx] = 1.0; } */ } #endif #ifdef __CUDACC__ inline __host__ #elif defined(__GNUC__) #endif static void doAll( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams) { auto length = shape::length(xShapeBuffer); auto eleStride = shape::elementWiseStride(xShapeBuffer); auto resultEleStride = shape::elementWiseStride(resultShapeBuffer); auto xOrder = shape::order(xShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); /* int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); */ if (xOrder == resultOrder && xOrder == 'c') { if (eleStride == 1 && resultEleStride == 1) { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; T currMax = dx[0]; //#pragma omp simd reduction (max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = 0.0; } result[maxIdx] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx] = 1.0; } } else { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; T currMax = dx[0]; //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { result[i * resultEleStride] = 0.0; if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } } result[maxIdx * resultEleStride] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { result[i * resultEleStride] = 0.0; if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx * resultEleStride] = 1.0; } } } else { Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto resultStride = shape::stride(resultShapeBuffer); auto rank = shape::rank(xShapeBuffer); T *originalResult = result; if (PrepareTwoRawArrayIter<T>(rank, xShape, dx, xStride, result, resultStride, &rank, shapeIter, &dx, xStridesIter, &result, resultStridesIter) >= 0) { T value = dx[0]; int idx = 0; int maxIdx = 0; ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (dx[0] > value) { value = dx[0]; maxIdx = idx; } idx++; result[0] = 0.0; } ND4J_RAW_ITER_TWO_NEXT( dim, rank, coord, shapeIter, dx, xStridesIter, result, resultStridesIter); //pointer to where max value would be if (shape::order(resultShapeBuffer) == 'c' || (shape::order(resultShapeBuffer) == 'f' && maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1] >= shape::length(resultShapeBuffer))) originalResult[maxIdx] = 1.0; else originalResult[maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1]] = 1.0; } } } public: #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // FIXME: MAX_DIMENSION is lower then FP16 frame if (extraParams == nullptr || (int) extraParams[0] == MAX_DIMENSION) { doAllCuda(dx, xShapeBuffer, result, resultShapeBuffer, extraParams, allocationPointer, reductionPointer, manager); } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { //FIXME: this op should be moved to CustomOps if (extraParams == nullptr || (int)extraParams[0] == 0 || ((int)extraParams[0] == 1 && (int)extraParams[1] == MAX_DIMENSION)) { doAll(dx, xShapeBuffer, result, resultShapeBuffer, extraParams); } else if (shape::isVector(xShapeBuffer)) { auto dimensionLength = (int)extraParams[0]; auto dimension = new int[dimensionLength]; auto length = shape::length(xShapeBuffer); for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int)extraParams[i + 1]; } if (shape::shapeOf(xShapeBuffer)[dimension[0]] == 1) { for (int i = 0; i < length; i++) { result[i] = 1.0; } } else { auto eleStride = shape::elementWiseStride(xShapeBuffer); if (eleStride == 1) { int maxIdx = 0; T currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = 0.0; } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } result[i] = 0.0; } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = 1.0; } } } else { auto dimensionLength = (int) extraParams[0]; auto dimension = new int[dimensionLength]; #pragma omp simd for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int) extraParams[i + 1]; } //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem auto tadShapeShapeInfo = tadShapeInfo; shape::TAD tad (xShapeBuffer, dimension, dimensionLength); if(tadShapeInfo==nullptr) { tad.createTadOnlyShapeInfo(); tad.createOffsets(); tadShapeShapeInfo = tad.tadOnlyShapeInfo; tadOffsets = tad.tadOffsets; } auto tadLength = shape::tadLength(xShapeBuffer, dimension, dimensionLength); auto tads = shape::length(xShapeBuffer) / tadLength; int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto tadEWS = shape::elementWiseStride(tadShapeShapeInfo); auto zEWS = tadEWS; int span = (tads / num_threads) + 8; #pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) { int tid = omp_get_thread_num(); int start = span * tid; int end = span * (tid + 1); if (end > tads) end = tads; for (int r = start; r < end; r++) { if (tadEWS > 0 && zEWS > 0 && dimensionLength == 1) { T *rX = dx + tadOffsets[r]; T *rZ = result + tadOffsets[r]; T maxValue = rX[0]; int maxIdx = 0; if (tadEWS == 1 && zEWS == 1) { //#pragma omp simd reduction(max:maxValue,maxIdx) for (int i = 0; i < tadLength; i++) { if (rX[i] > maxValue) { maxIdx = i; maxValue = rX[i]; } } #pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i] = maxIdx == i ? (T) 1.0 : (T) 0.0; } } else { //#pragma omp parallel for reduction(max:maxValue,maxIdx) default(shared) for (int i = 0; i < tadLength; i++) { if (rX[i * tadEWS] > maxValue) { maxIdx = i; maxValue = rX[i * tadEWS]; } } #pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i * zEWS] = maxIdx == i ? (T) 1.0 : (T) 0.0; } } } else { int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto offset = tadOffsets[r]; Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(tadShapeShapeInfo); auto xStride = shape::stride(tadShapeShapeInfo); auto resultStride = shape::stride(tadShapeShapeInfo); int rank = shape::rank(tadShapeShapeInfo); T *xPointer = dx + offset; T *resultPointer = result + offset; T maxValue = xPointer[0]; T *maxCursor = resultPointer; Nd4jPointer maxCursorLong = reinterpret_cast<Nd4jPointer>(maxCursor); if (PrepareTwoRawArrayIter<T>(rank, xShape, xPointer, xStride, resultPointer, resultStride, &rank, shapeIter, &xPointer, xStridesIter, &resultPointer, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (maxValue < xPointer[0]) { maxCursor = resultPointer; maxCursorLong = reinterpret_cast<Nd4jPointer>(resultPointer); maxValue = xPointer[0]; } resultPointer[0] = 0.0; } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, xPointer, xStridesIter, resultPointer, resultStridesIter); maxCursor = reinterpret_cast<T *>(maxCursorLong); maxCursor[0] = 1.0; } } } } delete[] dimension; } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; }
dqp3.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/mpi/blrm/dqp3.c * @version 0.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" #include "starsh-mpi.h" int starsh_blrm__dqp3_mpi(STARSH_blrm **matrix, STARSH_blrf *format, int maxrank, double tol, int onfly) //! Approximate each tile of BLR matrix with RRQR (GEQP3 function). /*! * @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. * @param[in] maxrank: Maximum possible rank. * @param[in] tol: Relative error tolerance. * @param[in] onfly: Whether not to store dense blocks. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; STARSH_int nblocks_far_local = F->nblocks_far_local; STARSH_int nblocks_near_local = F->nblocks_near_local; // Shortcuts to information about clusters STARSH_cluster *RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; // Following values default to given block low-rank format F, but they are // changed when there are false far-field blocks. STARSH_int new_nblocks_far = F->nblocks_far; STARSH_int new_nblocks_near = F->nblocks_near; STARSH_int new_nblocks_far_local = F->nblocks_far_local; STARSH_int new_nblocks_near_local = F->nblocks_near_local; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; STARSH_int *block_far_local = F->block_far_local; STARSH_int *block_near_local = F->block_near_local; // Places to store low-rank factors, dense blocks and ranks Array **far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int lbi, lbj, bi, bj = 0; double drsdd_time = 0, kernel_time = 0; const int oversample = starsh_params.oversample; // Init buffers to store low-rank factors of far-field blocks if needed if(nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far_local); STARSH_MALLOC(far_V, nblocks_far_local); STARSH_MALLOC(far_rank, nblocks_far_local); size_t size_U = 0, size_V = 0; // Simple cycle over all far-field blocks for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_U += RC->size[i]; size_V += CC->size[j]; } size_U *= maxrank; size_V *= maxrank; STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U+offset_U, *V = alloc_V+offset_V; offset_U += nrows*maxrank; offset_V += ncols*maxrank; array_from_buffer(far_U+lbi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V+lbi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } // Work variables int info; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows < ncols ? nrows : ncols; int mn2 = maxrank+oversample; if(mn2 > mn) mn2 = mn; // Get size of temporary arrays int lwork = 3*ncols+1, lwork_sdd = (4*(size_t)mn2+7)*mn2; if(lwork_sdd > lwork) lwork = lwork_sdd; lwork += (size_t)mn2*(2*ncols+mn2+1)+mn; int liwork = ncols, liwork_sdd = 8*mn2; if(liwork_sdd > liwork) liwork = liwork_sdd; double *D, *work; int *iwork; int info; // Allocate temporary arrays STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info); STARSH_PMALLOC(iwork, liwork, info); STARSH_PMALLOC(work, lwork, info); // Compute elements of a block #ifdef OPENMP double time0 = omp_get_wtime(); #endif kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); #ifdef OPENMP double time1 = omp_get_wtime(); #endif starsh_dense_dlrqp3(nrows, ncols, D, nrows, far_U[lbi]->data, nrows, far_V[lbi]->data, ncols, far_rank+lbi, maxrank, oversample, tol, work, lwork, iwork); #ifdef OPENMP double time2 = omp_get_wtime(); #pragma omp critical { drsdd_time += time2-time1; kernel_time += time1-time0; } #endif // Free temporary arrays free(D); free(work); free(iwork); } // Get number of false far-field blocks STARSH_int nblocks_false_far_local = 0; STARSH_int *false_far_local = NULL; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(far_rank[lbi] == -1) nblocks_false_far_local++; if(nblocks_false_far_local > 0) { // IMPORTANT: `false_far` and `false_far_local` must be in // ascending order for later code to work normally STARSH_MALLOC(false_far_local, nblocks_false_far_local); lbj = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(far_rank[lbi] == -1) false_far_local[lbj++] = block_far_local[lbi]; } // Sync list of all false far-field blocks STARSH_int nblocks_false_far = 0; int int_nblocks_false_far_local = nblocks_false_far_local; int *mpi_recvcount, *mpi_offset; int mpi_size, mpi_rank; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); STARSH_MALLOC(mpi_recvcount, mpi_size); STARSH_MALLOC(mpi_offset, mpi_size); MPI_Allgather(&int_nblocks_false_far_local, 1, MPI_INT, mpi_recvcount, 1, MPI_INT, MPI_COMM_WORLD); for(bi = 0; bi < mpi_size; bi++) nblocks_false_far += mpi_recvcount[bi]; mpi_offset[0] = 0; for(bi = 1; bi < mpi_size; bi++) mpi_offset[bi] = mpi_offset[bi-1]+mpi_recvcount[bi-1]; STARSH_int *false_far = NULL; if(nblocks_false_far > 0) STARSH_MALLOC(false_far, nblocks_false_far); MPI_Allgatherv(false_far_local, nblocks_false_far_local, my_MPI_SIZE_T, false_far, mpi_recvcount, mpi_offset, my_MPI_SIZE_T, MPI_COMM_WORLD); free(mpi_recvcount); free(mpi_offset); // Make false_far be in ascending order qsort(false_far, nblocks_false_far, sizeof(*false_far), cmp_size_t); if(nblocks_false_far > 0) { // Update list of near-field blocks new_nblocks_near = nblocks_near+nblocks_false_far; new_nblocks_near_local = nblocks_near_local+nblocks_false_far_local; STARSH_MALLOC(block_near, 2*new_nblocks_near); if(new_nblocks_near_local > 0) STARSH_MALLOC(block_near_local, new_nblocks_near_local); // At first get all near-field blocks, assumed to be dense #pragma omp parallel for schedule(static) for(bi = 0; bi < 2*nblocks_near; bi++) block_near[bi] = F->block_near[bi]; #pragma omp parallel for schedule(static) for(lbi = 0; lbi < nblocks_near_local; lbi++) block_near_local[lbi] = F->block_near_local[lbi]; // Add false far-field blocks #pragma omp parallel for schedule(static) for(bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2*(bi+nblocks_near)] = F->block_far[2*bj]; block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1]; } bi = 0; for(lbi = 0; lbi < nblocks_false_far_local; lbi++) { lbj = false_far_local[lbi]; while(bi < nblocks_false_far && false_far[bi] < lbj) bi++; block_near_local[nblocks_near_local+lbi] = nblocks_near+bi; } // Update list of far-field blocks new_nblocks_far = nblocks_far-nblocks_false_far; new_nblocks_far_local = nblocks_far_local-nblocks_false_far_local; if(new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2*new_nblocks_far); if(new_nblocks_far_local > 0) STARSH_MALLOC(block_far_local, new_nblocks_far_local); bj = 0; lbi = 0; lbj = 0; for(bi = 0; bi < nblocks_far; bi++) { // `false_far` must be in ascending order for this to work if(bj < nblocks_false_far && false_far[bj] == bi) { if(nblocks_false_far_local > lbj && false_far_local[lbj] == bi) { lbi++; lbj++; } bj++; } else { block_far[2*(bi-bj)] = F->block_far[2*bi]; block_far[2*(bi-bj)+1] = F->block_far[2*bi+1]; if(nblocks_far_local > lbi && F->block_far_local[lbi] == bi) { block_far_local[lbi-lbj] = bi-bj; lbi++; } } } } // Update format by creating new format STARSH_blrf *F2; info = starsh_blrf_new_from_coo_mpi(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_far_local, block_far_local, new_nblocks_near, block_near, new_nblocks_near_local, block_near_local, F->type); // Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; if(mpi_rank == 0) STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } // Compute near-field blocks if needed if(onfly == 0 && new_nblocks_near > 0) { STARSH_MALLOC(near_D, new_nblocks_near_local); size_t size_D = 0; // Simple cycle over all near-field blocks for(lbi = 0; lbi < new_nblocks_near_local; lbi++) { STARSH_int bi = block_near_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; // Update size_D size_D += nrows*ncols; } STARSH_MALLOC(alloc_D, size_D); // For each near-field block compute its elements #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < new_nblocks_near_local; lbi++) { STARSH_int bi = block_near_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; #pragma omp critical { D = alloc_D+offset_D; offset_D += nrows*ncols; //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D); //offset_D += near_D[lbi]->size; } array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D); #ifdef OPENMP double time0 = omp_get_wtime(); #endif kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); #ifdef OPENMP double time1 = omp_get_wtime(); #pragma omp critical kernel_time += time1-time0; #endif } } // Change sizes of far_rank, far_U and far_V if there were false // far-field blocks lbj = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) { if(far_rank[lbi] == -1) lbj++; else { int shape_U[2] = {far_U[lbi]->shape[0], far_rank[lbi]}; int shape_V[2] = {far_V[lbi]->shape[0], far_rank[lbi]}; array_from_buffer(far_U+lbi-lbj, 2, shape_U, 'd', 'F', far_U[lbi]->data); array_from_buffer(far_V+lbi-lbj, 2, shape_V, 'd', 'F', far_V[lbi]->data); far_rank[lbi-lbj] = far_rank[lbi]; } } if(nblocks_false_far_local > 0 && new_nblocks_far_local > 0) { STARSH_REALLOC(far_rank, new_nblocks_far_local); STARSH_REALLOC(far_U, new_nblocks_far_local); STARSH_REALLOC(far_V, new_nblocks_far_local); } // If all far-field blocks are false, then dealloc buffers if(new_nblocks_far_local == 0 && nblocks_far_local > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } // Dealloc list of false far-field blocks if it is not empty if(nblocks_false_far > 0) free(false_far); if(nblocks_false_far_local > 0) free(false_far_local); // Finish with creating instance of Block Low-Rank Matrix with given // buffers #ifdef OPENMP double mpi_drsdd_time = 0, mpi_kernel_time = 0; MPI_Reduce(&drsdd_time, &mpi_drsdd_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Reduce(&kernel_time, &mpi_kernel_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if(mpi_rank == 0) { //STARSH_WARNING("DRSDD kernel total time: %e secs", mpi_drsdd_time); //STARSH_WARNING("MATRIX kernel total time: %e secs", mpi_kernel_time); } #endif return starsh_blrm_new_mpi(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
omp_master_3.c
<ompts:test> <ompts:testdescription>Test which checks the omp master directive by counting up a variable in a omp master section. It also checks that the master thread has the thread number 0 as specified in the Open MP standard version 3.0.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp master</ompts:directive> <ompts:dependences>omp critical</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_master_3</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int nthreads; int executing_thread; int tid_result = 0; /* counts up the number of wrong thread no. for the master thread. (Must be 0) */ </ompts:orphan:vars> nthreads = 0; executing_thread = -1; #pragma omp parallel { <ompts:orphan> <ompts:check>#pragma omp master </ompts:check> { int tid = omp_get_thread_num(); if (tid != 0) { #pragma omp critical { tid_result++; } } #pragma omp critical { nthreads++; } executing_thread = omp_get_thread_num (); } /* end of master*/ </ompts:orphan> } /* end of parallel*/ return ((nthreads == 1) && (executing_thread == 0) && (tid_result == 0)); } </ompts:testcode> </ompts:test>
time.h
/*===---- time.h - OpenMP time header wrapper ------------------------ c ---=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ #ifndef __CLANG_OPENMP_TIME_H__ #define __CLANG_OPENMP_TIME_H__ #ifndef _OPENMP #error "This file is for OpenMP compilation only." #endif #if defined(__cplusplus) #define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) #else #define __DEVICE__ static __attribute__((always_inline, nothrow)) #endif #include_next <time.h> #pragma omp begin declare variant match( \ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) __DEVICE__ clock_t clock() { return __nvvm_read_ptx_sreg_clock(); } #pragma omp end declare variant #endif
Matrix.h
#ifndef PARALLELPROGRAM2_MATRIX_H #define PARALLELPROGRAM2_MATRIX_H #include <cstdlib> #include <cstdio> #include <algorithm> #include <cstring> #include <type_traits> #include <omp.h> #include "Timer.h" template <class T, size_t Dim, class = std::enable_if_t<std::is_arithmetic_v<T>>> class Matrix { public: Matrix() { Data = new T[Size]; } Matrix(T* data) { Data = data; } Matrix(Matrix<T, Dim>& m) { Data = new T[Size]; std::copy(m.Data, m.Data + Size, Data); } ~Matrix() { delete [] Data; deleteMatT(); } constexpr size_t getDim() { return Dim; } constexpr size_t getSize() { return Size; } void zero() { fill(0); } void fill(T value) { std::fill(Data, Data + Size, value); } void randFill() { for(size_t i = 0; i < Size; ++i) Data[i] = std::rand() & 0xfff; } void print() const { for(size_t i = 0; i < Dim; ++i) { for(size_t j = 0; j < Dim; ++j) { printf("%2d ", operator()(i, j)); } printf("\n"); } } const Matrix<T, Dim>& transpose() { if(MatT == nullptr) { #if RunTimers == 1 Timer t("transpose"); #endif T *DataT = new T[Size]; transposeData(Data, DataT); MatT = new Matrix<T, Dim>(DataT); } return *MatT; } // Умножение с вертикальным разложением (на столбцы) static void multiplyV(Matrix<T, Dim>& left, Matrix<T, Dim>& right, Matrix<T, Dim>& result) { multiplyDataVertical(left.Data, right.transpose().Data, result.Data); } // Умножение с горизонтальным разложением (на строки) static void multiplyH(const Matrix<T, Dim>& left, const Matrix<T, Dim>& right, Matrix<T, Dim>& result) { multiplyDataHorizontal(left.Data, right.Data, result.Data); } // Умножение с вертикальным разложением (на столбцы) в многопоточном режиме static void multiplyVMT(Matrix<T, Dim>& left, Matrix<T, Dim>& right, Matrix<T, Dim>& result) { multiplyDataVerticalMT(left.Data, right.transpose().Data, result.Data); } // Умножение с горизонтальным разложением (на строки) в многопоточном режиме static void multiplyHMT(const Matrix<T, Dim>& left, const Matrix<T, Dim>& right, Matrix<T, Dim>& result) { multiplyDataHorizontalMT(left.Data, right.Data, result.Data); } const Matrix<T, Dim>& operator*=(const Matrix<T, Dim>& other) { T* newData = new T[Size]; std::fill(newData, newData + Size, 0); // multiplyDataVerticalMT(Data, other.transpose().Data, newData); multiplyDataHorizontalMT(Data, other.Data, newData); deleteMatT(); delete[] Data; Data = newData; return *this; } Matrix<T, Dim> operator*(const Matrix<T, Dim>& right) const { T* newData = new T[Size]; std::fill(newData, newData + Size, 0); multiplyDataHorizontalMT(Data, right.Data, newData); return Matrix<T, Dim>(newData); } T& operator()(const size_t index) const { return Data[index]; } T& operator()(const size_t row, const size_t column) const { return Data[row * Dim + column]; } void deleteMatT() { if (MatT) { delete MatT; MatT = nullptr; } } private: T* Data = nullptr; static constexpr size_t Size = Dim*Dim; Matrix<T, Dim>* MatT = nullptr; static void transposeData(const T* data, T* result) { for(size_t i = 0; i < Dim; ++i) { for(size_t j = 0; j < Dim; ++j) { result[j*Dim + i] = data[i*Dim + j]; } } } static void multiplyDataVertical(const T* left, const T* rightT, T* result) { // i - row, j - column for(size_t i = 0; i < Dim; ++i) { for (size_t j = 0; j < Dim; ++j) { T sum = 0; for (size_t k = 0; k < Dim; ++k) { sum += left[i * Dim + k] * rightT[j * Dim + k]; } result[i * Dim + j] = sum; } } } static void multiplyDataHorizontal(const T* left, const T* right, T* result) { for(size_t a = 0; a < Dim; ++a) { for (size_t b = 0; b < Dim; ++b) { for (size_t c = 0; c < Dim; ++c) { result[a * Dim + c] += left[a * Dim + b] * right[b * Dim + c]; } } } } static void multiplyDataVerticalMT(const T* left, const T* rightT, T* result) { #pragma omp parallel for // i - row, j - column for(size_t i = 0; i < Dim; ++i) { for (size_t j = 0; j < Dim; ++j) { T sum = 0; for (size_t k = 0; k < Dim; ++k) { sum += left[i * Dim + k] * rightT[j * Dim + k]; } result[i * Dim + j] = sum; } } } static void multiplyDataHorizontalMT(const T* left, const T* right, T* result) { #pragma omp parallel for for(size_t a = 0; a < Dim; ++a) { for (size_t b = 0; b < Dim; ++b) { for (size_t c = 0; c < Dim; ++c) { result[a * Dim + c] += left[a * Dim + b] * right[b * Dim + c]; } } } } }; #endif //PARALLELPROGRAM2_MATRIX_NOPE_H
convolution_7x7_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = 49; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * 2 - outw * 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < 7; u++) { for (int v = 0; v < 7; v++) { const signed char* sptr = img.row<const signed char>(u) + v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[2]; ptr[2] = sptr[4]; ptr[3] = sptr[6]; sptr += 8; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[2]; sptr += 4; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += 2; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt); }
tensor.h
/** * Copyright (c) 2015, Jozef Stefan Institute, Quintelligence d.o.o. and contributors * All rights reserved. * * This source code is licensed under the FreeBSD license found in the * LICENSE file in the root directory of this source tree. */ #ifndef tensor_h #define tensor_h #include "base.h" #ifdef GLib_OPENMP #include <omp.h> #endif //TDTensor dense tensor //TSTensor sparse tensor //TKTensor Kruskal tensor //TTTensor Tucker tensor namespace TTensor { template <class TVal = TFlt, class TSizeMdTy = TInt, class TSizeNzTy = int> class TTensorOp; // high number of nonzero elements: TSizeNzTy = int64, high dimensionality of modes: TSizeMdTy = int64 template <class TVal = TFlt, class TSizeMdTy = TInt, class TSizeNzTy = int> class TSTensor { private: TInt Modes; // number of modes TVec<TSizeMdTy> DimV; // dimensions of each mode TVec<TVal, TSizeNzTy> Values; //values of nonzero elements TVVec<TSizeMdTy, TSizeNzTy> Coordinates; //coordinates of nonzero elements (X coordinate ranges over data points) public: // Sets dimensions TSTensor(const TVec<TSizeMdTy>& DimV_) { Modes = DimV_.Len(); DimV = DimV_; } // Sets dimensions and reserves space TSTensor(const TVec<TSizeMdTy>& DimV_, const TSizeNzTy& NNonZero) { Modes = DimV_.Len(); DimV = DimV_; Values.Gen(NNonZero); Coordinates.Gen(NNonZero, Modes); } // The input is assumed to come from saveSparse.m or similar (not robust) // Assumes double for values, int for dimensions and int or int64 for nnz TSTensor(const TStr& FileNm, const bool& BigIndex = false) { TFIn Reader(FileNm); TStr Line; Reader.GetNextLn(Line); TStrV StrVec; Line.SplitOnAllAnyCh(" ", StrVec, true); DimV.Gen(StrVec.Len()); Modes = DimV.Len(); for (int ModeN = 0; ModeN < Modes; ModeN++) { DimV[ModeN] = StrVec[ModeN].GetInt(); } Reader.GetNextLn(Line); if (!BigIndex) { int NNZ = Line.GetInt(); Values.Gen(NNZ); Coordinates.Gen(NNZ, Modes); } else { int64 NNZ = Line.GetInt64(); Values.Gen(NNZ); Coordinates.Gen(NNZ, Modes); } int64 LineN = 0; while (Reader.GetNextLn(Line)) { Line.SplitOnAllAnyCh(" ", StrVec, true); if (StrVec.Len() == 0) continue; Assert(StrVec.Len() == Modes + 1); Assert(LineN < Values.Len()); for (int ModeN = 0; ModeN < Modes; ModeN++) { Coordinates.PutXY(LineN, ModeN, StrVec[ModeN].GetInt()); } Values[LineN] = StrVec[Modes].GetFlt(); LineN++; } } void GenRandom(const TVec<TSizeMdTy>& DimV_, const TSizeNzTy& NNZ) { Modes = DimV_.Len(); DimV = DimV_; TRnd Rand; TVec<TVal, TSizeNzTy> Values2; //values of nonzero elements Values2.Gen(NNZ); TVVec<TSizeMdTy, TSizeNzTy> Coordinates2; Coordinates2.Gen(NNZ, Modes); if (NNZ > TInt::Mx) { IAssertR(false, "Int64 NNZ not implemented!"); } THashSet<TStr> Keys; // for duplicates Keys.Gen(NNZ); // for duplicates TSizeNzTy Offset = 0; for (TSizeNzTy ElN = 0; ElN < NNZ; ElN++) { Values2[ElN - Offset] = TFlt::GetRnd(); TStr Coordinate; for (int ModeN = 0; ModeN < Modes; ModeN++) { if (DimV[ModeN] < TInt::Mx) { Coordinates2.At(ElN - Offset, ModeN) = Rand.GetUniDevInt(DimV[ModeN]); } else { Coordinates2.At(ElN - Offset, ModeN) = Rand.GetUniDevInt64(DimV[ModeN]); } Coordinate += TInt::GetStr(Coordinates2.At(ElN - Offset, ModeN)) + "_"; // duplicates } // duplicates: repeat step or add key if (Keys.IsKey(Coordinate)) { Offset++; } else { Keys.AddKey(Coordinate); } //printf("%s, offset %d\n", Coordinate.CStr(), Offset); } Values.Gen(NNZ - Offset); //values of nonzero elements Coordinates.Gen(NNZ - Offset, Modes); for (TSizeNzTy ElN = 0; ElN < NNZ - Offset; ElN++) { Values[ElN] = Values2[ElN]; for (int ModeN = 0; ModeN < Modes; ModeN++) { Coordinates.At(ElN, ModeN) = Coordinates2.At(ElN, ModeN); } } } void Display() { printf("Number of modes %d\n", Modes); printf("Dimensions: "); for (int ModeN = 0; ModeN < Modes; ModeN++) { printf("%I64d ", DimV[ModeN]); } printf("\n"); printf("NNZ: %I64d\n", Values.Len()); for (int64 LineN = 0; LineN < Values.Len(); LineN++) { for (int ModeN = 0; ModeN < Modes; ModeN++) { printf("%I64d ", Coordinates.At(LineN, ModeN)); } printf("%f\n", Values[LineN]); } } int GetModes() const {return Modes;} const TVec<TSizeMdTy>& GetDimV() const {return DimV;} TSizeMdTy GetDim(const int& DimN) const {Assert((DimN >= 0) && (DimN < Modes) && (DimV.Len() == Modes)); return DimV[DimN];} TSizeNzTy GetNNZ() const {return Values.Len();} const TVVec<TSizeMdTy, TSizeNzTy>& GetCoordinates() const {return Coordinates;} const TVec<TVal, TSizeNzTy>& GetValues() const {return Values;} TVal GetNorm() const { TVal norm = 0.0; for (TSizeNzTy ElN = 0; ElN < Values.Len(); ElN++){ norm += Values[ElN] * Values[ElN]; } return sqrt(norm); } bool IsConsistent() const { if (DimV.Len() != Modes) return false; if (DimV.Len() != Coordinates.GetYDim()) return false; if (Values.Len() != Coordinates.GetXDim()) return false; //read coordinates and check for bounds for (TSizeNzTy RowN = 0; RowN < Coordinates.GetXDim(); RowN++) { for (TSizeNzTy ColN = 0; ColN < Coordinates.GetYDim(); ColN++) { if ((Coordinates.At(RowN,ColN) < 0) || (Coordinates.At(RowN,ColN) >= DimV[(int)ColN])) return false; } } return true; } }; // high dimensionality of modes: TSizeMdTy = int64 template <class TVal = TFlt, class TSizeMdTy = TInt, class TSizeNzTy = int> class TKTensor { private: TInt Modes; // number of modes TVec<TSizeMdTy> DimV; // dimensions of each mode (number of modesis an int!) TInt R; // number of components TVec<TVal> Lambda; //coefficient vector: number of components is an int! length = R TVec<TVVec<TVal, TSizeMdTy> > U; //basis matrices, columns should be normalized (x = rowIdx, y = colIdx), ydim = R public: TKTensor(const TVec<TSizeMdTy>& DimV_, const int& R_) { Modes = DimV_.Len(); R = R_; Lambda.Gen(R); //Lambda.PutAll(0.0); DimV = DimV_; U.Gen(Modes, 0); for (int ModeN = 0; ModeN < Modes; ModeN++) { // Create DimV[ModeN] x R matrix TVVec<TVal, TSizeMdTy> Factor; Factor.Gen(DimV[ModeN], R); U.Add(Factor); } } TKTensor(const TStr& FileNm, const bool& BigIndex = false) { TFIn Reader(FileNm); TStr Line; // dimensions Reader.GetNextLn(Line); TStrV StrVec; Line.SplitOnAllAnyCh(" ", StrVec, true); DimV.Gen(StrVec.Len()); Modes = DimV.Len(); for (int ModeN = 0; ModeN < Modes; ModeN++) { if (!BigIndex) { DimV[ModeN] = StrVec[ModeN].GetInt(); } else { DimV[ModeN] = StrVec[ModeN].GetInt64(); } } // lambda Reader.GetNextLn(Line); Line.SplitOnAllAnyCh(" ", StrVec, true); R = StrVec.Len(); Lambda.Gen(R); for (int FacN = 0; FacN < R; FacN++) { Lambda[FacN] = StrVec[FacN].GetFlt(); } printf("startt U\n"); // U matrices U.Gen(Modes, 0); for (int ModeN = 0; ModeN < Modes; ModeN++) { TVVec<TVal, TSizeMdTy> Factor; Factor.Gen(DimV[ModeN], R); for (TSizeMdTy RowN = 0; RowN < DimV[ModeN]; RowN++) { Reader.GetNextLn(Line); Line.SplitOnAllAnyCh(" ", StrVec, true); for (int FacN = 0; FacN < R; FacN++) { Factor.PutXY(RowN, FacN, StrVec[FacN].GetFlt()); } } U.Add(Factor); } } void GenRandom(const TVec<TSizeMdTy>& DimV_, const int& R_) { Modes = DimV_.Len(); R = R_; Lambda.Gen(R); TRnd Rand; for (int FacN = 0; FacN < R; FacN++) { Lambda[FacN] = Rand.GetNrmDev(); } DimV = DimV_; U.Gen(Modes, 0); for (int ModeN = 0; ModeN < Modes; ModeN++) { // Create DimV[ModeN] x R matrix TVVec<TVal, TSizeMdTy> Factor; Factor.Gen(DimV[ModeN], R); for (TSizeMdTy RowN = 0; RowN < DimV[ModeN]; RowN++) { for (int ColN = 0; ColN < R; ColN++) { Factor.At(RowN, ColN) = Rand.GetNrmDev();; } } U.Add(Factor); } } void Display() { printf("Number of modes %d\n", Modes); printf("Dimensions: "); for (int ModeN = 0; ModeN < Modes; ModeN++) { printf("%I64d ", DimV[ModeN]); } printf("\n"); printf("Number of factors %d\n", R); printf("lambda:\n"); for (int FacN = 0; FacN < R; FacN++) { printf("%f ", Lambda[FacN]); } printf("\n"); for (int ModeN = 0; ModeN < Modes; ModeN++) { printf("Mode %d factor:\n", ModeN); for (TSizeMdTy RowN = 0; RowN < DimV[ModeN]; RowN++) { for (int FacN = 0; FacN < R; FacN++) { printf("%f ", U[ModeN].At(RowN, FacN)); } printf("\n"); } } } int GetModes() const {return Modes;} const TIntV& GetDimV() const {return DimV;} TSizeMdTy GetDim(const int& DimN) const {Assert((DimN >= 0) && (DimN < Modes) && (DimV.Len() == Modes)); return DimV[DimN];} const TVVec<TVal, TSizeMdTy>& GetFactor(const int& DimN) const {Assert((DimN >= 0) && (DimN < Modes) && (U.Len() == Modes)); return U[DimN];} int GetR() const {return R;} const TVec<TVal>& GetLambda() const {return Lambda;} TVal GetNorm() const { // 5.2.5. in http://prod.sandia.gov/techlib/access-control.cgi/2006/067592.pdf TVal norm = 0.0; TVVec<TVal> HadGram(R, R); HadGram.PutAll(1.0); for (int ModeN = 0; ModeN < Modes; ModeN++) { TVVec<TVal> Temp(R, R); Temp.PutAll(0.0); for (TSizeMdTy RowN = 0; RowN < DimV[ModeN]; RowN++) { for (int Col1N = 0; Col1N < R; Col1N++) { for (int Col2N = Col1N; Col2N < R; Col2N++) { TVal Prod = U[ModeN].At(RowN, Col1N) * U[ModeN].At(RowN, Col2N); Temp.At(Col1N, Col2N) += Prod; } } } for (int Col1N = 0; Col1N < R; Col1N++) { for (int Col2N = Col1N; Col2N < R; Col2N++) { HadGram.At(Col1N, Col2N) *= Temp.At(Col1N, Col2N); } } } for (int RowN = 0; RowN < R; RowN++) { for (int ColN = RowN; ColN < R; ColN++) { if (RowN != ColN) { norm += 2 * HadGram.At(RowN, ColN) * Lambda[RowN] * Lambda[ColN]; } else { norm += HadGram.At(RowN, ColN) * Lambda[RowN] * Lambda[ColN]; } } } return sqrt(norm); } bool IsConsistent() const { if (DimV.Len() != Modes) return false; if (Modes != U.Len()) return false; if (Lambda.Len() != R) return false; for (int ModeN = 0; ModeN < Modes; ModeN++) { if (DimV[ModeN] != U[ModeN].GetXDim()) return false; if (Lambda.Len() != U[ModeN].GetYDim()) return false; } return true; } //pointer to sparse tensor, pointer to current CP, index of mode to update void CP_ALS_Update(const TSTensor<TVal, TSizeMdTy, TSizeNzTy>& X, const int& UpdateIdx, const int& nThreads = 1) { //printf("iter start, compute flat * khatri rao\n"); // U[UpdateIdx] = (X_(UpdateIdx) * KhatriRao_{i != UpdateIdx}U_i) * pseudoinv(had_prod_{i != UpdateIdx} (U_i'U_i)) U[UpdateIdx].PutAll(0); TSizeNzTy NNZ = X.GetNNZ(); TTmStopWatch Sw; Sw.Start(); #pragma omp parallel for num_threads(nThreads) for (int ColN = 0; ColN < R; ColN++) { for (TSizeNzTy ElN = 0; ElN < NNZ; ElN++) { TVal Temp = X.GetValues().GetVal(ElN); for (int ModeN = 0; ModeN < Modes; ModeN++) { if (ModeN == UpdateIdx) continue; Temp *= U[ModeN].At(X.GetCoordinates().At(ElN, ModeN) , ColN); } //#pragma omp critical U[UpdateIdx].At(X.GetCoordinates().At(ElN, UpdateIdx), ColN) += Temp; } } Sw.Stop(); printf("%f\n", Sw.GetSec()); //printf("X_(UpdateIdx) * KhatriRao_{i != UpdateIdx}U_i) computed\n", UpdateIdx); // hadamard product of matrices U[i]'*U[i], i != UpdateIdx TVVec<TVal> HadGram(R, R); HadGram.PutAll(1.0); for (int ModeN = 0; ModeN < Modes; ModeN++) { if (ModeN == UpdateIdx) continue; TVVec<TVal> Temp(R, R); Temp.PutAll(0.0); for (TSizeMdTy RowN = 0; RowN < DimV[ModeN]; RowN++) { for (int Col1N = 0; Col1N < R; Col1N++) { for (int Col2N = Col1N; Col2N < R; Col2N++) { TVal Prod = U[ModeN].At(RowN, Col1N) * U[ModeN].At(RowN, Col2N); Temp.At(Col1N, Col2N) += Prod; } } } for (int Col1N = 0; Col1N < R; Col1N++) { for (int Col2N = Col1N; Col2N < R; Col2N++) { HadGram.At(Col1N, Col2N) *= Temp.At(Col1N, Col2N); } } } for (int Col1N = 0; Col1N < R; Col1N++) { for (int Col2N = 0; Col2N < Col1N; Col2N++) { HadGram.At(Col1N, Col2N) *= HadGram.At(Col2N, Col1N); } } //printf("Hadamard computed\n"); // psuedoinverse of hadamard TFltVV HadGramInv; HadGramInv.Gen(R, R); TLinAlg::InverseSVD(HadGram, HadGramInv); //printf("Pseudo inverse computed\n"); // multiply with A.U[UpdateIdx] * IHadGram TVec<TVal> Temp(R); for (TSizeMdTy RowN = 0; RowN < DimV[UpdateIdx]; RowN++) { Temp.PutAll(0.0); for (int ColN = 0; ColN < R; ColN++) { for (int k = 0; k < R; k++) { Temp[ColN] += U[UpdateIdx].At(RowN, k) * HadGramInv.At(k, ColN); } } for (int ColN = 0; ColN < R; ColN++) { U[UpdateIdx].At(RowN, ColN) = Temp[ColN]; } } //printf("Multiply completed\n"); // norm A.U[UpdateIdx][:, ColN], set lambda[ColN] to norm and normalize A.U[UpdateIdx][:, ColN] for (int ColN = 0; ColN < R; ColN++) { double norm = 0.0; for (TSizeMdTy RowN = 0; RowN < DimV[UpdateIdx]; RowN++) { norm += U[UpdateIdx].At(RowN, ColN) * U[UpdateIdx].At(RowN, ColN); } Lambda[ColN] = sqrt(norm); for (TSizeMdTy RowN = 0; RowN < DimV[UpdateIdx]; RowN++) { U[UpdateIdx].At(RowN, ColN) /= Lambda[ColN]; } } //printf("Normalization, lambda finished\n"); }; //pointer to sparse tensor, pointer to initial CP, stopping criterion (number of iterations, tolerance) void CP_ALS(const TSTensor<TVal, TSizeMdTy, TSizeNzTy>& X, const int& NumIter, const double& Tol = 0.0, const int& nThreads = 1) { //Check if each of this and X are consistent Assert(IsConsistent()); Assert(X.IsConsistent()); //Check if this and X are compatible (modes and dimensions) Assert(X.GetModes() == GetModes()); for (int ModeN = 0; ModeN < X.GetModes(); ModeN++) { Assert(X.GetDim(ModeN) == GetDim(ModeN)); } TVal OldRelRes = 1.0; TVal RelRes = 1.0; for (int IterN = 0; IterN < NumIter; IterN++) { // Update CP_ALS_Update(X, IterN % X.GetModes(), nThreads); if (Tol > 0.0) { // Tolerance check TVal normX = X.GetNorm(); TVal normA = GetNorm(); TVal innerXA = TTensorOp<TVal, TSizeMdTy, TSizeNzTy>::InnerProduct(X, *this); OldRelRes = RelRes; RelRes = sqrt(normX * normX - 2 * innerXA + normA * normA)/normX; printf("Iter: %d, res: %f, fit: %f\n", IterN, RelRes, 1.0-RelRes); if (IterN > 0) { if (abs(RelRes - OldRelRes) < Tol) { break; } } } else { printf("Iter: %d\n", IterN); } } }; }; template <class TVal, class TSizeMdTy, class TSizeNzTy> class TTensorOp { public: static TVal InnerProduct(const TSTensor<TVal, TSizeMdTy, TSizeNzTy>& X, const TKTensor<TVal, TSizeMdTy>& A) { TVal innerp = 0.0; TSizeNzTy NNZ = X.GetNNZ(); int R = A.GetR(); int Modes = A.GetModes(); for (TSizeNzTy ElN = 0; ElN < NNZ; ElN++) { TVal sum = 0.0; for (int ColN = 0; ColN < R; ColN++) { TVal prod = A.GetLambda().GetVal(ColN); for (int ModeN = 0; ModeN < Modes; ModeN++) { prod *= A.GetFactor(ModeN).At(X.GetCoordinates().At(ElN, ModeN), ColN); } sum += prod; } innerp += X.GetValues().GetVal(ElN) * sum; } return innerp; } }; } #endif
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p, *magick_restrict q; register Quantum *magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=(double) MagickMin(GetPixelChannels(image), GetPixelChannels(reconstruct_image))* GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, distance, Sa; MagickBooleanType difference; register ssize_t i; difference=MagickFalse; distance=0.0; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance+=pixel*pixel; if (distance > fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i])); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView *image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo *kernel_info; MagickBooleanType status; register ssize_t i; size_t columns, rows; ssize_t y; /* Compute structural similarity index @ https://en.wikipedia.org/wiki/Structural_similarity. */ radius=SSIMRadius; artifact=GetImageArtifact(image,"compare:ssim-radius"); if (artifact != (const char *) NULL) radius=StringToDouble(artifact,(char **) NULL); sigma=SSIMSigma; artifact=GetImageArtifact(image,"compare:ssim-sigma"); if (artifact != (const char *) NULL) sigma=StringToDouble(artifact,(char **) NULL); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); c1=pow(SSIMK1*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k1"); if (artifact != (const char *) NULL) c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); c2=pow(SSIMK2*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k2"); if (artifact != (const char *) NULL) c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y- ((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/ 2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels+1], x_pixel_sigma_squared[MaxPixelChannels+1], xy_sigma[MaxPixelChannels+1], y_pixel_mu[MaxPixelChannels+1], y_pixel_sigma_squared[MaxPixelChannels+1]; register const Quantum *magick_restrict reference, *magick_restrict target; register double *k; ssize_t v; (void) memset(x_pixel_mu,0,sizeof(x_pixel_mu)); (void) memset(x_pixel_sigma_squared,0, sizeof(x_pixel_sigma_squared)); (void) memset(xy_sigma,0,sizeof(xy_sigma)); (void) memset(x_pixel_sigma_squared,0, sizeof(y_pixel_sigma_squared)); (void) memset(y_pixel_mu,0,sizeof(y_pixel_mu)); (void) memset(y_pixel_sigma_squared,0, sizeof(y_pixel_sigma_squared)); k=kernel_info->values; reference=p; target=q; for (v=0; v < (ssize_t) kernel_info->height; v++) { register ssize_t u; for (u=0; u < (ssize_t) kernel_info->width; u++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel=QuantumScale*reference[i]; x_pixel_mu[i]+=(*k)*x_pixel; x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel; y_pixel=QuantumScale* GetPixelChannel(reconstruct_image,channel,target); y_pixel_mu[i]+=(*k)*y_pixel; y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel; xy_sigma[i]+=(*k)*x_pixel*y_pixel; } k++; reference+=GetPixelChannels(image); target+=GetPixelChannels(reconstruct_image); } reference+=GetPixelChannels(image)*columns; target+=GetPixelChannels(reconstruct_image)*columns; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i]; y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i]; xy_mu=x_pixel_mu[i]*y_pixel_mu[i]; xy_sigmas=xy_sigma[i]-xy_mu; x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared; y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared; ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/ ((x_pixel_mu_squared+y_pixel_mu_squared+c1)* (x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2)); channel_distortion[i]+=ssim; channel_distortion[CompositePixelChannel]+=ssim; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i=0; i <= MaxPixelChannels; i++) distortion[i]+=channel_distortion[i]; } image_view=DestroyCacheView(image_view); reconstruct_view=DestroyCacheView(reconstruct_view); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i]/=((double) columns*rows); } distortion[CompositePixelChannel]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); kernel_info=DestroyKernelInfo(kernel_info); return(status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetStructuralSimilarityDistortion(image,reconstruct_image, distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=(1.0-(distortion[i]))/2.0; return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); exception=(&image->exception); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MaxTextExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); exception=(&image->exception); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); status=InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap, (double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateContrastImage(image,sharpen,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) && 0 /* Call OpenCL version */ status=AccelerateContrastStretchImageChannel(image,channel,black_point, white_point,&image->exception); if (status != MagickFalse) return status; #endif histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) { if (stretch_map != (QuantumPixelPacket *) NULL) stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; IndexPacket *magick_restrict indexes; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \ distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,0,0,MagickTrue,exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) memset(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; PixelPacket *magick_restrict q; ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; MagickPixelPacket aggregate; PixelPacket pixel; const PixelPacket *magick_restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); } p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) /* Call OpenCL version */ status=AccelerateEqualizeImage(image,channel,&image->exception); if (status != MagickFalse) return status; #endif /* Allocate and initialize histogram arrays. */ equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) memset(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) memset(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaImageTag "Gamma/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap, PerceptibleReciprocal(gamma))))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,PerceptibleReciprocal(gamma)); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,PerceptibleReciprocal(gamma)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,PerceptibleReciprocal(gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,PerceptibleReciprocal(gamma)); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), PerceptibleReciprocal(gamma))); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),PerceptibleReciprocal(gamma))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),PerceptibleReciprocal(gamma))); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),PerceptibleReciprocal(gamma))); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } /* Grayscale image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,&image->exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double area, offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; area=point.y; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel3); offset+=cube_size; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel4); area=point.z; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,area,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) != MagickFalse) || (IsGrayColorspace(white_color->colorspace) != MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickTrue; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); exception=(&image->exception); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,Quantum *red, Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,Quantum *red, Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,Quantum *red, Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
matmult.c
#include <stdio.h> #include <stdlib.h> #include "matmult_initialize.h" #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ double** allocateMatrix(int rows, int cols) { int i; double **matrix = (double**)malloc((sizeof(double*)) * rows); for (i=0; i<rows; i++) { matrix[i] = (double*)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double** matrix, int rows, int cols) { int i; for (i=0; i<rows; i++) { free(matrix[i]); } free(matrix); } __inline double multiply(double a, double b) { return a * b; } // cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for(j=0; j<cols_b; j++) { for (k=0; k<cols_a; k++) { c[i][j] += multiply(a[i][k], b[k][j]); } } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { c[i][j] += multiply(a[i][k], b[k][j]); } } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } int main (int argc, char *argv[]) { do_work(); printf("Done.\n"); return 0; }
citrix_ns_fmt_plug.c
/* * Description from Nicolas Ruff: * - Salt value is hashed as an hexadecimal string, not bytes. * - The trailing NULL byte of password string is taken into account during * hashing. * - The leading '1' is actually the string length * '1' = 49 = len('1') + len(hex_salt) + len(hex_sha1) * * --------------------------------------- * import hashlib * * def netscaler_hash( rand_bytes, pwd ): * s = hashlib.sha1() * s.update( rand_bytes ) * s.update( pwd ) * return "1" + rand_bytes + s.hexdigest() * * # TEST VECTOR * # 14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f * * rand_bytes = "4dfca1e6" * pwd = "nsroot\x00" * print netscaler_hash( rand_bytes, pwd ) * --------------------------------------- * * This software is Copyright (c) 2013 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This version is hard coded for salt length 8 (for speed). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ctrxns; #elif FMT_REGISTERS_H john_register_one(&fmt_ctrxns); #else #include <string.h> #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "johnswap.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #include "simd-intrinsics.h" #include "common.h" #include "sha.h" #include "memdbg.h" // Must be last included header #define FORMAT_LABEL "Citrix_NS10" #define FORMAT_NAME "Netscaler 10" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH (55 - SALT_SIZE - 1) #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ((index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (((i)&3)^3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"100000000f1dc96f425971ba590a076fd0f8bccbf25c1ba0c", ""}, {"14623718525fe334bbd9c0704e06ce134ef17b51f6b33548c", " "}, {"15c5c5c5c6ccd884f6383f55a6aeba5f847775e57ab012675", "Tw"}, {"13333333319143136ba9ff9e18d1cb022b63df0926de9509e", "333"}, {"144434241d7ce89a7484cd202400639692258dde37efc29c5", "four"}, {"100010203e09cefed1847b7a2a5e7a5d2cdc67e8a56ed0bdd", "fiver"}, {"14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f", "nsroot"}, {"1deadcafe7587ea23b25a6ccf3fd53192e36ad3e9a2553b20", "magnum!"}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char (*saved_key)[SHA_BUF_SIZ * 4 * NBKEYS]; static unsigned char (*crypt_key)[BINARY_SIZE * NBKEYS]; static unsigned int kpc; #else static char saved_salt[SALT_SIZE]; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt / NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt / NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); kpc = self->params.max_keys_per_crypt; #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static void *get_binary(char *ciphertext) { static unsigned char *realcipher; int i, len; if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); len = *ciphertext; ciphertext += len - 2 * BINARY_SIZE; for(i = 0; i < BINARY_SIZE; i++) { realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } #ifdef SIMD_COEF_32 alter_endianity(realcipher, BINARY_SIZE); #endif return (void*)realcipher; } static int valid(char *ciphertext, struct fmt_main *self) { int len; len = *ciphertext; if (len != (int)'1') return 0; if (strlen(ciphertext) != len) return 0; if (len != strspn(ciphertext, HEXCHARS_lc)) return 0; return 1; } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const ARCH_WORD_32 *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif ARCH_WORD_32 *keybuf_word = (ARCH_WORD_32*)&saved_key[0][GETPOS(SALT_SIZE ^ 3, index)]; unsigned int len; ARCH_WORD_32 temp; len = SALT_SIZE; while((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 16)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80U << 24)); len+=2; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); keybuf_word += SIMD_COEF_32; if (!(temp & 0xff000000)) { *keybuf_word = 0x80000000; len+=3; goto key_cleaning; } len += 4; } *keybuf_word = 0x00800000; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } len += 1; /* Trailing null is included */ ((unsigned int*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3; #else strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned int i, s; static char out[PLAINTEXT_LENGTH + 1]; s = (((unsigned int*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3) - SALT_SIZE - 1; for(i = 0; i < s; i++) out[i] = ((char*)saved_key)[GETPOS(SALT_SIZE + i, index)]; out[i] = 0; return out; #else return saved_key[index]; #endif } static void *get_salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; ARCH_WORD_32 w; } out; ciphertext++; memcpy(out.c, ciphertext, SALT_SIZE); return (void*)out.c; } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 int i, index; for (index = 0; index < kpc; index++) for (i = 0; i < SALT_SIZE; i++) saved_key[0][GETPOS(i, index)] = ((unsigned char*)salt)[i]; #else memcpy(saved_salt, salt, SALT_SIZE); #endif } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y=0; for(; y < kpc/SIMD_COEF_32; y++) for(x = 0; x < SIMD_COEF_32; x++) { if(((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32*5]) return 1; } return 0; #else int index = 0; #ifdef _OPENMP for (index = 0; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x, y; x = index & (SIMD_COEF_32-1); y = (unsigned int)index / SIMD_COEF_32; if(((ARCH_WORD_32*)binary)[0] != ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32*5]) return 0; if(((ARCH_WORD_32*)binary)[1] != ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*1]) return 0; if(((ARCH_WORD_32*)binary)[2] != ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*2]) return 0; if(((ARCH_WORD_32*)binary)[3] != ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*3]) return 0; if(((ARCH_WORD_32*)binary)[4] != ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*4]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; ++index) #endif { #ifdef SIMD_COEF_32 SIMDSHA1body(saved_key[index], (unsigned int*)crypt_key[index], NULL, SSEi_MIXED_IN); #else SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char*)saved_salt, SALT_SIZE); SHA1_Update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]) + 1); SHA1_Final((unsigned char*)crypt_key[index], &ctx); #endif } return count; } #ifdef SIMD_COEF_32 #define HASH_IDX ((index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*5) static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_ctrxns = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
dposv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zposv.c, normal z -> d, Fri Sep 28 17:38:09 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_posv * * Computes the solution to a system of linear equations A * X = B, * where A is an n-by-n symmetric positive definite matrix and X and B are * n-by-nrhs matrices. The Cholesky decomposition is used to factor A as * * \f[ A = L\times L^T, \f] if uplo = PlasmaLower, * or * \f[ A = U^T\times U, \f] if uplo = PlasmaUpper, * * where U is an upper triangular matrix and L is a lower triangular matrix. * The factored form of A is then used to solve the system of equations: * * A * X = B. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns * of the matrix B. nrhs >= 0. * * @param[in,out] pA * On entry, the symmetric positive definite matrix A. * If uplo = PlasmaUpper, the leading n-by-n upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If UPLO = 'L', the leading n-by-n lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from * the Cholesky factorization A = U^T*U or A = L*L^T. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in,out] pB * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_dposv * @sa plasma_cposv * @sa plasma_dposv * @sa plasma_sposv * ******************************************************************************/ int plasma_dposv(plasma_enum_t uplo, int n, int nrhs, double *pA, int lda, double *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_potrf(plasma, PlasmaRealDouble, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_triangular_create(PlasmaRealDouble, uplo, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dtr2desc(pA, lda, A, &sequence, &request); plasma_omp_dge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_dposv(uplo, A, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2tr(A, pA, lda, &sequence, &request); plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_posv * * Solves a symmetric positive definite system of linear equations * using Cholesky factorization. * Non-blocking tile version of plasma_dposv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in,out] A * On entry, the symmetric positive definite matrix A. * If uplo = PlasmaUpper, the leading n-by-n upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If UPLO = 'L', the leading n-by-n lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from * the Cholesky factorization A = U^T*U or A = L*L^T. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dposv * @sa plasma_omp_cposv * @sa plasma_omp_dposv * @sa plasma_omp_sposv * ******************************************************************************/ void plasma_omp_dposv(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pdpotrf(uplo, A, sequence, request); plasma_enum_t trans; trans = uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans; plasma_pdtrsm(PlasmaLeft, uplo, trans, PlasmaNonUnit, 1.0, A, B, sequence, request); trans = uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans; plasma_pdtrsm(PlasmaLeft, uplo, trans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
GB_binop__gt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_int8) // A.*B function (eWiseMult): GB (_AemultB_08__gt_int8) // A.*B function (eWiseMult): GB (_AemultB_02__gt_int8) // A.*B function (eWiseMult): GB (_AemultB_04__gt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int8) // A*D function (colscale): GB (_AxD__gt_int8) // D*A function (rowscale): GB (_DxB__gt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__gt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__gt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int8) // C=scalar+B GB (_bind1st__gt_int8) // C=scalar+B' GB (_bind1st_tran__gt_int8) // C=A+scalar GB (_bind2nd__gt_int8) // C=A'+scalar GB (_bind2nd_tran__gt_int8) // C type: bool // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT8 || GxB_NO_GT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log2_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log2_fc32_fc32) // op(A') function: GB (_unop_tran__log2_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_clog2f (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog2f (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_clog2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log2_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log2_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
symv_x_bsr_u_lo.c
#include "alphasparse/kernel.h" #ifdef _OPENMP #include<omp.h> #endif #include"alphasparse/opt.h" #include<string.h> #include "stdio.h" #include <stdlib.h> #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT thread_num = alpha_get_thread_num(); const ALPHA_INT m = A->rows * A->block_size; const ALPHA_INT n = A->cols * A->block_size; const ALPHA_INT bs = A->block_size; const ALPHA_INT bs2 = bs * bs; ALPHA_INT b_rows = A->rows; ALPHA_INT b_cols = A->cols; if (b_rows != b_cols) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs); memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs); if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR) { for (ALPHA_INT br = local_m_s; br < local_m_e; br++) { ALPHA_INT row = br * bs; ALPHA_INT block_start = A->rows_start[br],block_end = A->rows_end[br]; ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start],&A->col_indx[block_end],br)-A->col_indx; for(ALPHA_INT ai = block_start; ai < lower_end;ai++) { ALPHA_INT bc = A->col_indx[ai]; ALPHA_INT col = bc * bs; ALPHA_INT a0_idx = ai * bs2; if (bc == br) { for (ALPHA_INT b_row = 0; b_row < bs; b_row++) { for (ALPHA_INT b_col = 0; b_col < b_row; b_col++) { alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]); } } } else { for (ALPHA_INT b_row = 0; b_row < bs; b_row++) { for (ALPHA_INT b_col = 0; b_col < bs; b_col++) { alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]); } } } } } } else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR) { for (ALPHA_INT br = 0; br < b_rows; br++) { ALPHA_INT row = br * bs; ALPHA_INT block_start = A->rows_start[br],block_end = A->rows_end[br]; ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start],&A->col_indx[block_end],br)-A->col_indx; for(ALPHA_INT ai = block_start; ai < lower_end;ai++) { ALPHA_INT bc = A->col_indx[ai]; ALPHA_INT col = bc * bs; ALPHA_INT a0_idx = ai * bs2; ALPHA_Number val_orig; ALPHA_Number temp_orig; // diagonal block containing diagonal entry if (bc == br) { for (ALPHA_INT b_col = 0; b_col < bs; b_col++) { for (ALPHA_INT b_row = b_col + 1; b_row < bs; b_row++) { alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]); } } } else { for (ALPHA_INT b_col = 0; b_col < bs; b_col++) { for (ALPHA_INT b_row = 0; b_row < bs; b_row++) { alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]); alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]); } } } } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < b_cols * bs; ++i) { ALPHA_Number tmp_y; alpha_setzero(tmp_y); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); } alpha_mul(y[i], y[i], beta); alpha_madde(y[i], x[i], alpha); alpha_madde(y[i], tmp_y, alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
SpatialClassNLLCriterion.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialClassNLLCriterion.c" #else #define INITIAL_CHECK \ THArgCheck(THIndexTensor_(nDimensionLegacyAll)(target) == 3, 3, \ "only batches of spatial targets supported (3D tensors)" \ " but got targets of dimension: %d", \ THIndexTensor_(nDimensionLegacyAll)(target)); \ THArgCheck(THTensor_(nDimensionLegacyAll)(input) == 4, 2, \ "only batches of spatial inputs supported (4D tensors), " \ "but got input of dimension: %d", THTensor_(nDimensionLegacyAll)(input)); \ if (weights && THTensor_(nElement)(weights) != THTensor_(size)(input, 1)) { \ THError("weight tensor should be defined either for all or no classes"); \ } \ \ { \ int64_t input0 = THTensor_(size)(input, 0); \ int64_t input1 = THTensor_(size)(input, 1); \ int64_t input2 = THTensor_(size)(input, 2); \ int64_t input3 = THTensor_(size)(input, 3); \ int64_t target0 = THIndexTensor_(size)(target, 0); \ int64_t target1 = THIndexTensor_(size)(target, 1); \ int64_t target2 = THIndexTensor_(size)(target, 2); \ THAssertMsg(input0 == target0 && input2 == target1 && input3 == target2, \ "size mismatch (got input: %ldx%ldx%ldx%ld, target: %ldx%ldx%ld)", \ input0, input1, input2, input3, target0, target1, target2); \ } #define GRADOUTPUT_SHAPE_CHECK \ THArgCheck(THTensor_(nDimensionLegacyAll)(gradOutput) == 3, 3, \ "gradOutput must have same dimension as target (3)" \ " but got dimension: %d", \ THTensor_(nDimensionLegacyAll)(gradOutput)); \ { \ int64_t gradOutput0 = THTensor_(size)(gradOutput, 0); \ int64_t gradOutput1 = THTensor_(size)(gradOutput, 1); \ int64_t gradOutput2 = THTensor_(size)(gradOutput, 2); \ int64_t target0 = THIndexTensor_(size)(target, 0); \ int64_t target1 = THIndexTensor_(size)(target, 1); \ int64_t target2 = THIndexTensor_(size)(target, 2); \ THAssertMsg( \ gradOutput0 == target0 && gradOutput1 == target1 && gradOutput2 == target2, \ "size mismatch (got gradOutput: %ldx%ldx%ld, target: %ldx%ldx%ld)", \ gradOutput0, gradOutput1, gradOutput2, target0, target1, target2); \ } void THNN_(SpatialClassNLLCriterion_updateOutput)( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *output, int64_t reduction, THTensor *weights, THTensor *total_weight, int64_t ignore_index) { INITIAL_CHECK; THTensor_(resize1d)(output, 1); THTensor_(resize1d)(total_weight, 1); ignore_index -= TH_INDEX_BASE; if (reduction == Reduction::None) { int64_t batch_size = THTensor_(size)(input, 0); int64_t H = THTensor_(size)(input, 2); int64_t W = THTensor_(size)(input, 3); THTensor_(resize3d)(output, batch_size, H, W); int64_t b, h, w; #pragma omp parallel for private(b, h, w) for (b = 0; b < batch_size; b++) { for (h = 0; h < H; h++) { for (w = 0; w < W; w++) { int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w) - TH_INDEX_BASE; if (cur_target == ignore_index) { THTensor_(fastSet3d)(output, b, h, w, 0.0f); continue; } real value = THTensor_(fastGet4d)(input, b, cur_target, h, w); real weight = weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f; THTensor_(fastSet3d)(output, b, h, w, -value * weight); } } } return; } input = THTensor_(newContiguous)(input); target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; real *input_data = THTensor_(data)(input); THIndex_t *target_data = THIndexTensor_(data)(target); real *weights_data = weights ? THTensor_(data)(weights) : NULL; real *output_data = THTensor_(data)(output); real *total_weight_data = THTensor_(data)(total_weight); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; real total_weight_acc = 0; real output_acc = 0; for (int b = 0; b < batch_size; b++) { for (int elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); real cur_weight = weights ? weights_data[cur_target] : 1.0f; total_weight_acc += cur_weight; output_acc -= input_data[b * sample_size + cur_target * map_size + elem] * cur_weight; } } *total_weight_data = total_weight_acc; *output_data = output_acc; if (reduction == Reduction::ElementwiseMean && *total_weight_data) *output_data /= *total_weight_data; THTensor_(free)(input); THIndexTensor_(free)(target); if (weights) THTensor_(free)(weights); } void THNN_(SpatialClassNLLCriterion_updateGradInput)( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *gradOutput, THTensor *gradInput, int64_t reduction, THTensor *weights, THTensor *total_weight, int64_t ignore_index) { INITIAL_CHECK; THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous"); THNN_CHECK_SHAPE(input, gradInput); ignore_index -= TH_INDEX_BASE; if (reduction == Reduction::None) { GRADOUTPUT_SHAPE_CHECK; int64_t batch_size = THTensor_(size)(input, 0); int64_t H = THTensor_(size)(input, 2); int64_t W = THTensor_(size)(input, 3); int64_t b, h, w; #pragma omp parallel for private(b, h, w) for (b = 0; b < batch_size; b++) { for (h = 0; h < H; h++) { for (w = 0; w < W; w++) { int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w) - TH_INDEX_BASE; if (cur_target == ignore_index) { continue; } real value = -(weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f); real gradOutput_value = THTensor_(fastGet3d)(gradOutput, b, h, w); THTensor_(fastSet4d)(gradInput, b, cur_target, h, w, value * gradOutput_value); } } } return; } THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1); real *total_weight_data = THTensor_(data)(total_weight); if (*total_weight_data <= 0) return; target = THIndexTensor_(newContiguous)(target); weights = weights ? THTensor_(newContiguous)(weights) : NULL; THIndex_t *target_data = THIndexTensor_(data)(target); real *weights_data = weights ? THTensor_(data)(weights) : NULL; real *gradInput_data = THTensor_(data)(gradInput); int64_t batch_size = THTensor_(size)(input, 0); int64_t n_classes = THTensor_(size)(input, 1); int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); int64_t sample_size = map_size * n_classes; real normalize = (reduction == Reduction::ElementwiseMean) ? *total_weight_data : 1.0f; int b; #pragma omp parallel for for (b = 0; b < batch_size; b++) { int elem; for (elem = 0; elem < map_size; elem++) { int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; if (cur_target == ignore_index) continue; THAssert(cur_target >= 0 && cur_target < n_classes); int index = b * sample_size + cur_target * map_size + elem; gradInput_data[index] = -(weights ? weights_data[cur_target] : 1.0f) / normalize * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0); } } THIndexTensor_(free)(target); if (weights) THTensor_(free)(weights); } #undef INITIAL_CHECK #endif
relic_core.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2017 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Implementation of the library basic functions. * * @ingroup relic */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <relic_core.h> #include <relic_rand.h> #include <relic_types.h> #include <relic_err.h> #include <relic_arch.h> #include <relic_fp.h> #include <relic_fb.h> #include <relic_ep.h> #include <relic_eb.h> #include <relic_cp.h> #include <relic_pp.h> /*============================================================================*/ /* Public definitions */ /*============================================================================*/ /** * If multi-threading is enabled, assigns each thread a local copy of the data. */ #if MULTI == PTHREAD #define thread __thread #else #define thread /* */ #endif /** * Default library context. */ thread ctx_t first_ctx; /** * Active library context. */ thread ctx_t *core_ctx = NULL; #if MULTI != RELIC_NONE /* * Initializer function to call for every thread's context */ void (*core_thread_initializer)(void* init_ptr) = NULL; void* core_init_ptr = NULL; #endif #if MULTI == OPENMP #pragma omp threadprivate(first_ctx, core_ctx) #endif int core_init(void) { if (core_ctx == NULL) { core_ctx = &(first_ctx); } #if defined(CHECK) && defined(TRACE) core_ctx->trace = 0; #endif #ifdef CHECK core_ctx->reason[ERR_NO_MEMORY] = MSG_NO_MEMORY; core_ctx->reason[ERR_NO_PRECI] = MSG_NO_PRECI; core_ctx->reason[ERR_NO_FILE] = MSG_NO_FILE; core_ctx->reason[ERR_NO_READ] = MSG_NO_READ; core_ctx->reason[ERR_NO_VALID] = MSG_NO_VALID; core_ctx->reason[ERR_NO_BUFFER] = MSG_NO_BUFFER; core_ctx->reason[ERR_NO_FIELD] = MSG_NO_FIELD; core_ctx->reason[ERR_NO_CURVE] = MSG_NO_CURVE; core_ctx->reason[ERR_NO_CONFIG] = MSG_NO_CONFIG; core_ctx->last = NULL; #endif /* CHECK */ #if ALLOC == STATIC core_ctx->next = 0; #endif #ifdef OVERH core_ctx->over = 0; #endif core_ctx->code = STS_OK; TRY { arch_init(); rand_init(); #ifdef WITH_FP fp_prime_init(); #endif #ifdef WITH_FB fb_poly_init(); #endif #ifdef WITH_FT ft_poly_init(); #endif #ifdef WITH_EP ep_curve_init(); #endif #ifdef WITH_EB eb_curve_init(); #endif #ifdef WITH_ED ed_curve_init(); #endif #ifdef WITH_PP pp_map_init(); #endif } CATCH_ANY { return STS_ERR; } return STS_OK; } int core_clean(void) { rand_clean(); #ifdef WITH_FP fp_prime_clean(); #endif #ifdef WITH_FB fb_poly_clean(); #endif #ifdef WITH_FT ft_poly_clean(); #endif #ifdef WITH_EP ep_curve_clean(); #endif #ifdef WITH_EB eb_curve_clean(); #endif #ifdef WITH_ED ed_curve_clean(); #endif #ifdef WITH_PP pp_map_clean(); #endif arch_clean(); core_ctx = NULL; return STS_OK; } ctx_t *core_get(void) { #if MULTI != RELIC_NONE if (core_ctx == NULL && core_thread_initializer != NULL) { core_thread_initializer(core_init_ptr); } #endif return core_ctx; } void core_set(ctx_t *ctx) { core_ctx = ctx; } #if MULTI != RELIC_NONE void core_set_thread_initializer(void(*init)(void *init_ptr), void* init_ptr) { core_thread_initializer = init; core_init_ptr = init_ptr; } #endif
kt_sbucket.c
#include "kt_sbucket.h" #include "yche/log.h" void sbucket_update_edge( support_bucket_t * const sbucket, int64_t const edge_id, int32_t const support, int32_t const ktruss) { slist_s * const slist = sbucket->slist; /* no-op if edge has already been deleted or updated */ if(support < 0 || support == slist[edge_id].support) { return; } /* peel starting at (ktruss - 3) */ int32_t const min_sup = ktruss - 3; ssize_t * shead = sbucket->list_head; /* * NOTE: The logic of selecting new/old support instead of what is actually * given is that we are ultimately interested in the smallest bucket * having ALL edges which need to be peeled. So bottom-occupied list actually * contains all to-be-peeled edges. */ /* remove edge_id from current support-bucket */ int32_t const old_sup = gk_max(slist[edge_id].support, min_sup); /* if edge_id is the head of the list */ if(shead[old_sup] == edge_id) { shead[old_sup] = slist[edge_id].next_eid; slist[slist[edge_id].next_eid].prev_eid = -1; } else { slist[slist[edge_id].prev_eid].next_eid = slist[edge_id].next_eid; slist[slist[edge_id].next_eid].prev_eid = slist[edge_id].prev_eid; } /* now add edge_id to the head of the new list */ int32_t const new_sup = gk_max(support, min_sup); slist[edge_id].support = support; slist[edge_id].prev_eid = -1; slist[edge_id].next_eid = shead[new_sup]; slist[shead[new_sup]].prev_eid = edge_id; shead[new_sup] = edge_id; } int64_t sbucket_count_support_size( support_bucket_t const * const sbucket, int32_t const support) { if(support >= sbucket->nsupports) { return 0; } /* traverse linked list to count edges */ int64_t nedges = 0; ssize_t e_id = sbucket->list_head[support]; while(e_id != -1) { ++nedges; e_id = sbucket->slist[e_id].next_eid; } return nedges; } int64_t sbucket_get_frontier( support_bucket_t * const sbuckets, int32_t const support, int64_t * frontier) { int const nbuckets = omp_get_max_threads() * KT_BUCKETS_PER_THREAD; int64_t bucket_sizes[1 + KT_MAX_THREADS * KT_BUCKETS_PER_THREAD]; #pragma omp parallel { /* first get size of each bucket */ #pragma omp for schedule(dynamic, 1) for(int b=0; b < nbuckets; ++b) { bucket_sizes[b] = sbucket_count_support_size(&(sbuckets[b]), support); } /* prefix sum to allow parallel writes */ #pragma omp single { int b; MAKECSR(b, nbuckets, bucket_sizes); } /* now copy data into frontier buffer */ #pragma omp for schedule(dynamic, 1) for(int b=0; b < nbuckets; ++b) { /* traverse list and fill buffer */ int64_t * buffer = &(frontier[bucket_sizes[b]]); int64_t edge_ptr = 0; ssize_t e_id = sbuckets[b].list_head[support]; while(e_id != -1) { buffer[edge_ptr++] = e_id; e_id = sbuckets[b].slist[e_id].next_eid; } /* We are deleting all edges in bucket, so update head of list. */ sbuckets[b].list_head[support] = -1; sbuckets[b].slist[-1].prev_eid = -1; sbuckets[b].slist[-1].next_eid = -1; } /* foreach bucket */ } /* end omp parallel */ return bucket_sizes[nbuckets]; } void sbucket_fill_edges( support_bucket_t const * const sbucket, int32_t const support, int64_t * const restrict edge_ids) { if(support >= sbucket->nsupports) { return; } /* traverse linked list and fill buffer */ int64_t edge_ptr = 0; ssize_t e_id = sbucket->list_head[support]; while(e_id != -1) { edge_ids[edge_ptr++] = e_id; e_id = sbucket->slist[e_id].next_eid; } } support_bucket_t * sbucket_alloc( edge_t const * const edges, int32_t const * const supports, int64_t const global_nedges, thread_ws * * thd_ws) { /* allocate buckets */ int const nbuckets = omp_get_max_threads() * KT_BUCKETS_PER_THREAD; support_bucket_t * sbuckets = gk_malloc(nbuckets * sizeof(*sbuckets), "sbuckets"); int32_t const nsupports = max_elem(supports, global_nedges) + 1; log_debug("nsupport: %d", nsupports); /* * It is easier to have a single global slist that the various buckets * point into. This allows us to avoid any local <-> global mappings of * edge IDs. */ slist_s * big_slist = gk_malloc((global_nedges+1)*sizeof(*big_slist), "big_slist"); par_memset(big_slist, 0, (global_nedges+1) * sizeof(*big_slist)); ++big_slist; /* +1 to allow slist[-1] to be valid */ /* allocate each thread-bucket */ #pragma omp parallel for schedule(static, 1) for(int bucket=0; bucket < nbuckets; ++bucket) { support_bucket_t * sbucket = &(sbuckets[bucket]); sbucket->nsupports = nsupports; sbucket->nowned_edges = 0; sbucket->slist = big_slist; sbucket->list_head = gk_malloc(sbucket->nsupports * sizeof(*sbucket->list_head), "list_head"); ssize_t * const shead = sbucket->list_head; for(int32_t s=0; s < sbucket->nsupports; ++s) { shead[s] = -1; } } /* go over all edges and assign to support-buckets */ for(int64_t e=0; e < global_nedges; ++e) { int64_t const bucket = map_edge_to_bucket(e, thd_ws[0]); support_bucket_t * sbucket = &(sbuckets[bucket]); slist_s * slist = sbucket->slist; ssize_t * const shead = sbucket->list_head; int32_t const sup = supports[e]; /* fill data */ slist[e].prev_eid = -1; slist[e].next_eid = shead[sup]; slist[e].support = sup; /* update doubly-linked list */ if(shead[sup] != -1) { slist[shead[sup]].prev_eid = e; } shead[sup] = e; ++sbucket->nowned_edges; } /* foreach edge */ return sbuckets; } void sbucket_free( support_bucket_t * sbucket) { --(sbucket->slist); gk_free((void **) &(sbucket->slist), LTERM); /* XXX this is all wrong and does not account for multi buckets... */ gk_free((void **) &sbucket->list_head, LTERM); gk_free((void **) &sbucket, LTERM); }
GB_emult_04.c
//------------------------------------------------------------------------------ // GB_emult_04: C<M>= A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity // structure as M, and its pattern is a subset of M. // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse bitmap bitmap (method: 04) // sparse sparse bitmap full (method: 04) // sparse sparse full bitmap (method: 04) // sparse sparse full full (method: 04) // TODO: this function can also do eWiseAdd, just as easily. // Just change the "&&" to "||" in the GB_emult_04_template. // If A and B are both full, eadd and emult are identical. #include "GB_ewise.h" #include "GB_emult.h" #include "GB_binop.h" #include "GB_unused.h" #ifndef GBCOMPACT #include "GB_binop__include.h" #endif #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (M_ek_slicing, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } GrB_Info GB_emult_04 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full ( GrB_Matrix C, // output matrix, static header const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix M, // sparse/hyper, not NULL const bool Mask_struct, // if true, use the only structure of M bool *mask_applied, // if true, the mask was applied const GrB_Matrix A, // input A matrix (bitmap/full) const GrB_Matrix B, // input B matrix (bitmap/full) const GrB_BinaryOp op, // op to perform C = op (A,B) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; ASSERT_MATRIX_OK (M, "M for emult_04", GB0) ; ASSERT_MATRIX_OK (A, "A for emult_04", GB0) ; ASSERT_MATRIX_OK (B, "B for emult_04", GB0) ; ASSERT_BINARYOP_OK (op, "op for emult_04", GB0) ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ; int C_sparsity = GB_sparsity (M) ; GBURBLE ("emult_04:(%s<%s>=%s.*%s) ", GB_sparsity_char (C_sparsity), GB_sparsity_char_matrix (M), GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (M_ek_slicing, int64_t) ; //-------------------------------------------------------------------------- // get M, A, and B //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ; const int64_t vlen = M->vlen ; const int64_t vdim = M->vdim ; const int64_t nvec = M->nvec ; const int64_t mnz = GB_nnz (M) ; const size_t msize = M->type->size ; const int8_t *restrict Ab = A->b ; const int8_t *restrict Bb = B->b ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate C->p and C->h //-------------------------------------------------------------------------- GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header ctype, vlen, vdim, GB_Ap_calloc, C_is_csc, C_sparsity, M->hyper_switch, nvec, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // slice the mask matrix M //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int M_ntasks, M_nthreads ; GB_SLICE_MATRIX (M, 8, chunk) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + M_ntasks ; Cp_kfirst = Work + M_ntasks * 2 ; //-------------------------------------------------------------------------- // count entries in C //-------------------------------------------------------------------------- // This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR). // TODO: if M is structural and A and B are both full, then C has exactly // the same pattern as M, the first phase can be skipped. int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Mh, k) ; int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j) int64_t pM, pM_end ; GB_get_pA (&pM, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, vlen) ; int64_t cjnz = 0 ; for ( ; pM < pM_end ; pM++) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t i = Mi [pM] ; cjnz += (GBB (Ab, pstart + i) && // TODO: for GB_add, use || instead GBB (Bb, pstart + i)) ; } } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } //-------------------------------------------------------------------------- // finalize Cp, cumulative sum of Cp and compute Cp_kfirst //-------------------------------------------------------------------------- GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ; GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec, Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ; //-------------------------------------------------------------------------- // allocate C->i and C->x //-------------------------------------------------------------------------- int64_t cnz = Cp [nvec] ; // set C->iso = C_iso OK GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // copy pattern into C //-------------------------------------------------------------------------- // TODO: could make these components of C shallow instead if (GB_IS_HYPERSPARSE (M)) { // copy M->h into C->h GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ; } C->nvec = nvec ; C->jumbled = M->jumbled ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // get the opcode //-------------------------------------------------------------------------- GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; bool op_is_first = (opcode == GB_FIRST_binop_code) ; bool op_is_second = (opcode == GB_SECOND_binop_code) ; bool op_is_pair = (opcode == GB_PAIR_binop_code) ; GB_Type_code ccode = ctype->code ; //-------------------------------------------------------------------------- // check if the values of A and/or B are ignored //-------------------------------------------------------------------------- // With C = ewisemult (A,B), only the intersection of A and B is used. // If op is SECOND or PAIR, the values of A are never accessed. // If op is FIRST or PAIR, the values of B are never accessed. // If op is PAIR, the values of A and B are never accessed. // Contrast with ewiseadd. // A is passed as x, and B as y, in z = op(x,y) bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ; bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ; //-------------------------------------------------------------------------- // using a built-in binary operator (except for positional operators) //-------------------------------------------------------------------------- #define GB_PHASE_2_OF_2 if (C_iso) { //---------------------------------------------------------------------- // C is iso //---------------------------------------------------------------------- // Cx [0] = cscalar = op (A,B) GB_BURBLE_MATRIX (C, "(iso emult) ") ; memcpy (C->x, cscalar, csize) ; // pattern of C = set intersection of pattern of A and B #define GB_ISO_EMULT #include "GB_emult_04_template.c" } else { //---------------------------------------------------------------------- // C is non-iso //---------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //------------------------------------------------------------------ // define the worker for the switch factory //------------------------------------------------------------------ #define GB_AemultB_04(mult,xname) GB (_AemultB_04_ ## mult ## xname) #define GB_BINOP_WORKER(mult,xname) \ { \ info = GB_AemultB_04(mult,xname) (C, M, Mask_struct, A, B, \ Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ GB_Type_code xcode, ycode, zcode ; if (!op_is_positional && GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern, op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode) { #define GB_NO_PAIR #include "GB_binop_factory.c" } #endif //---------------------------------------------------------------------- // generic worker //---------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "(generic emult_04: %s) ", op->name) ; GB_ewise_generic (C, op, NULL, 0, 0, NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD4, Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0, M, Mask_struct, false, A, B, Context) ; } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C output for emult_04", GB0) ; (*mask_applied) = true ; return (GrB_SUCCESS) ; }
pbkdf2-hmac-md5_fmt_plug.c
/* * This software is Copyright (c) 2015 Dhiru and magnum * and it is hereby released to * the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_md5; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_md5); #else #include <ctype.h> #include <string.h> #include <assert.h> #include <stdint.h> #include "arch.h" //#undef SIMD_COEF_32 #include "misc.h" #include "common.h" #include "formats.h" #include "pbkdf2_hmac_md5.h" #include "pbkdf2_hmac_common.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PBKDF2-HMAC-MD5" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-MD5 " MD5_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-MD5 32/" ARCH_BITS_STR #endif #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #if SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define PLAINTEXT_LENGTH 125 static struct custom_salt { unsigned int length; unsigned int rounds; char salt[PBKDF2_32_MAX_SALT_SIZE]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[PBKDF2_MDx_BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; memset(&cs, 0, sizeof(cs)); if (!strncmp(ciphertext, PBKDF2_MD5_FORMAT_TAG, PBKDF2_MD5_TAG_LEN)) ciphertext += PBKDF2_MD5_TAG_LEN; cs.rounds = atoi(ciphertext); ciphertext = strchr(ciphertext, '$') + 1; p = strchr(ciphertext, '$'); saltlen = 0; memset(cs.salt, 0, sizeof(cs.salt)); while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void*)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #if SIMD_COEF_32 int lens[SSE_GROUP_SZ_MD5], i; unsigned char *pin[SSE_GROUP_SZ_MD5]; union { uint32_t *pout[SSE_GROUP_SZ_MD5]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_MD5; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_md5_sse((const unsigned char **)pin, lens, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_MDx_BINARY_SIZE, 0); #else pbkdf2_md5((unsigned char*)(saved_key[index]), strlen(saved_key[index]), (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_MDx_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; //dump_stuff_msg("\nbinary", crypt_out[count - 1], 16); return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_MDx_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_md5_cmp_exact(get_key(index), source, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_md5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_MDx_BINARY_SIZE, PBKDF2_32_BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { PBKDF2_MD5_FORMAT_TAG }, pbkdf2_hmac_md5_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, pbkdf2_hmac_md5_valid, pbkdf2_hmac_md5_split, pbkdf2_hmac_md5_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__ainv_bool_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_uint8 // op(A') function: GB_tran__ainv_bool_uint8 // C type: bool // A type: uint8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_uint8 ( bool *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bget_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint16) // C=scalar+B GB (_bind1st__bget_uint16) // C=scalar+B' GB (_bind1st_tran__bget_uint16) // C=A+scalar GB (_bind2nd__bget_uint16) // C=A'+scalar GB (_bind2nd_tran__bget_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bget_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sparselu.c
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de * Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 * USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #ifdef __linux__ #include <linux/mman.h> #endif #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <unistd.h> #include "bots.h" #include "sparselu.h" extern char bots_arg_file[256]; /*********************************************************************** * checkmat: **********************************************************************/ int checkmat(float* M, float* N) { int i, j; float r_err; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i * bots_arg_size_1 + j] - N[i * bots_arg_size_1 + j]; if (r_err == 0.0) continue; if (r_err < 0.0) r_err = -r_err; if (M[i * bots_arg_size_1 + j] == 0) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i, j, M[i * bots_arg_size_1 + j], i, j, N[i * bots_arg_size_1 + j]); return FALSE; } r_err = r_err / M[i * bots_arg_size_1 + j]; if (r_err > EPSILON) { bots_message( "Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i, j, M[i * bots_arg_size_1 + j], i, j, N[i * bots_arg_size_1 + j], r_err); return FALSE; } } } return TRUE; } /*********************************************************************** * genmat: **********************************************************************/ static void synthetic_genmat(float* M[]) { int null_entry, init_val, i, j, ii, jj; float* p; int a = 0, b = 0; init_val = 1325; /* generating the structure */ for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry = FALSE; if ((ii < jj) && (ii % 3 != 0)) null_entry = TRUE; if ((ii > jj) && (jj % 3 != 0)) null_entry = TRUE; if (ii % 2 == 1) null_entry = TRUE; if (jj % 2 == 1) null_entry = TRUE; if (ii == jj) null_entry = FALSE; if (ii == jj - 1) null_entry = FALSE; if (ii - 1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE) { a++; M[ii * bots_arg_size + jj] = (float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); if ((M[ii * bots_arg_size + jj] == NULL)) { bots_message("Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii * bots_arg_size + jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { b++; M[ii * bots_arg_size + jj] = NULL; } } } bots_debug("allo = %d, no = %d, total = %d, factor = %f\n", a, b, a + b, (float)((float)a / (float)(a + b))); } static void structure_from_file_genmat(float* M[]) { int a, b, jj; int num_blocks, max_id; int fd = open(bots_arg_file, O_RDONLY); if (fd == -1) abort(); struct stat buf; if (fstat(fd, &buf) == -1) abort(); void* base = mmap(NULL, buf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); uint64_t* fptr = (uint64_t*)base; uint64_t version = *fptr++; if (version != 1) abort(); uint64_t sizeof_edge = *fptr++; if (sizeof_edge != 4) abort(); uint64_t num_nodes = *fptr++; uint64_t num_edges = *fptr++; uint64_t* out_idx = fptr; fptr += num_nodes; uint32_t* fptr32 = (uint32_t*)fptr; uint32_t* outs = fptr32; fptr32 += num_edges; if (num_edges % 2) fptr32 += 1; float* edge_data = (float*)fptr32; memset(M, 0, bots_arg_size * bots_arg_size * sizeof(*M)); num_blocks = (num_nodes + bots_arg_size_1 - 1) / bots_arg_size_1; max_id = bots_arg_size_1 * bots_arg_size; printf("full size: %d\n", num_blocks); /* generating the structure */ uint32_t ii; for (ii = 0; ii < num_nodes; ++ii) { if (ii >= max_id) break; int bii = ii / bots_arg_size_1; uint64_t begin = (ii == 0) ? out_idx[0] : out_idx[ii - 1]; uint64_t end = out_idx[ii]; uint64_t edge; for (edge = begin; edge < end; ++edge) { /* computing null entries */ int jj = outs[edge]; if (jj >= max_id) continue; int bjj = jj / bots_arg_size_1; if (M[bii * bots_arg_size + bjj] == NULL) { a++; M[bii * bots_arg_size + bjj] = (float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); memset(M[bii * bots_arg_size + bjj], 0, bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); } if (M[bii * bots_arg_size + bjj] == NULL) { bots_message("Error: Out of memory\n"); exit(101); } if (M[bjj * bots_arg_size + bii] == NULL) { a++; M[bjj * bots_arg_size + bii] = (float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); memset(M[bjj * bots_arg_size + bii], 0, bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); } if (M[bjj * bots_arg_size + bii] == NULL) { bots_message("Error: Out of memory\n"); exit(101); } M[bii * bots_arg_size + bjj][(ii % bots_arg_size_1) * bots_arg_size_1 + (jj % bots_arg_size_1)] = edge_data[edge]; M[bjj * bots_arg_size + bii][(jj % bots_arg_size_1) * bots_arg_size_1 + (ii % bots_arg_size_1)] = edge_data[edge]; } } // Add identity diagonal as necessary for (ii = 0; ii < bots_arg_size; ++ii) { if (M[ii * bots_arg_size + ii] == NULL) { a++; M[ii * bots_arg_size + ii] = (float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); memset(M[ii * bots_arg_size + ii], 0, bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); } for (jj = 0; jj < bots_arg_size_1; ++jj) { if (M[ii * bots_arg_size + ii][jj * bots_arg_size_1 + jj] == 0.0) M[ii * bots_arg_size + ii][jj * bots_arg_size_1 + jj] = 1.0; } } b = num_blocks * num_blocks - a; bots_debug("allo = %d, no = %d, total = %d, factor = %f\n", a, b, a + b, (float)((float)a / (float)(a + b))); } void genmat(float* M[]) { if (strlen(bots_arg_file) == 0) synthetic_genmat(M); else structure_from_file_genmat(M); } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char* name, float* M[]) { int ii, jj; bots_message("Structure for matrix %s @ 0x%p\n", name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii * bots_arg_size + jj] != NULL) { bots_message("x"); } else bots_message(" "); } bots_message("\n"); } bots_message("\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float* allocate_clean_block() { int i, j; float *p, *q; p = (float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float)); q = p; if (p != NULL) { for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++) { (*p) = 0.0; p++; } } else { bots_message("Error: Out of memory\n"); exit(101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float* diag) { int i, j, k; for (k = 0; k < bots_arg_size_1; k++) for (i = k + 1; i < bots_arg_size_1; i++) { diag[i * bots_arg_size_1 + k] = diag[i * bots_arg_size_1 + k] / diag[k * bots_arg_size_1 + k]; for (j = k + 1; j < bots_arg_size_1; j++) diag[i * bots_arg_size_1 + j] = diag[i * bots_arg_size_1 + j] - diag[i * bots_arg_size_1 + k] * diag[k * bots_arg_size_1 + j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float* diag, float* row) { int i, j, k; for (i = 0; i < bots_arg_size_1; i++) for (k = 0; k < bots_arg_size_1; k++) { row[i * bots_arg_size_1 + k] = row[i * bots_arg_size_1 + k] / diag[k * bots_arg_size_1 + k]; for (j = k + 1; j < bots_arg_size_1; j++) row[i * bots_arg_size_1 + j] = row[i * bots_arg_size_1 + j] - row[i * bots_arg_size_1 + k] * diag[k * bots_arg_size_1 + j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float* row, float* col, float* inner) { int i, j, k; for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++) for (k = 0; k < bots_arg_size_1; k++) inner[i * bots_arg_size_1 + j] = inner[i * bots_arg_size_1 + j] - row[i * bots_arg_size_1 + k] * col[k * bots_arg_size_1 + j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float* diag, float* col) { int i, j, k; for (j = 0; j < bots_arg_size_1; j++) for (k = 0; k < bots_arg_size_1; k++) for (i = k + 1; i < bots_arg_size_1; i++) col[i * bots_arg_size_1 + j] = col[i * bots_arg_size_1 + j] - diag[i * bots_arg_size_1 + k] * col[k * bots_arg_size_1 + j]; } void sparselu_init(float*** pBENCH, char* pass) { *pBENCH = (float**)malloc(bots_arg_size * bots_arg_size * sizeof(float*)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_seq_call(float** BENCH) { int ii, jj, kk; for (kk = 0; kk < bots_arg_size; kk++) { lu0(BENCH[kk * bots_arg_size + kk]); for (jj = kk + 1; jj < bots_arg_size; jj++) if (BENCH[kk * bots_arg_size + jj] != NULL) { fwd(BENCH[kk * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj]); } for (ii = kk + 1; ii < bots_arg_size; ii++) if (BENCH[ii * bots_arg_size + kk] != NULL) { bdiv(BENCH[kk * bots_arg_size + kk], BENCH[ii * bots_arg_size + kk]); } for (ii = kk + 1; ii < bots_arg_size; ii++) if (BENCH[ii * bots_arg_size + kk] != NULL) for (jj = kk + 1; jj < bots_arg_size; jj++) if (BENCH[kk * bots_arg_size + jj] != NULL) { if (BENCH[ii * bots_arg_size + jj] == NULL) BENCH[ii * bots_arg_size + jj] = allocate_clean_block(); bmod(BENCH[ii * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj], BENCH[ii * bots_arg_size + jj]); } } } void sparselu_par_call(float** BENCH) { int ii, jj, kk; bots_message( "Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size, bots_arg_size, bots_arg_size_1, bots_arg_size_1); #pragma omp parallel private(kk) { for (kk = 0; kk < bots_arg_size; kk++) { #pragma omp single lu0(BENCH[kk * bots_arg_size + kk]); #pragma omp for nowait for (jj = kk + 1; jj < bots_arg_size; jj++) if (BENCH[kk * bots_arg_size + jj] != NULL) #pragma omp task untied firstprivate(kk, jj) shared(BENCH) { fwd(BENCH[kk * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj]); } #pragma omp for for (ii = kk + 1; ii < bots_arg_size; ii++) if (BENCH[ii * bots_arg_size + kk] != NULL) #pragma omp task untied firstprivate(kk, ii) shared(BENCH) { bdiv(BENCH[kk * bots_arg_size + kk], BENCH[ii * bots_arg_size + kk]); } #pragma omp for private(jj) for (ii = kk + 1; ii < bots_arg_size; ii++) if (BENCH[ii * bots_arg_size + kk] != NULL) for (jj = kk + 1; jj < bots_arg_size; jj++) if (BENCH[kk * bots_arg_size + jj] != NULL) #pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH) { if (BENCH[ii * bots_arg_size + jj] == NULL) BENCH[ii * bots_arg_size + jj] = allocate_clean_block(); bmod(BENCH[ii * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj], BENCH[ii * bots_arg_size + jj]); } } } bots_message(" completed!\n"); } void sparselu_fini(float** BENCH, char* pass) { print_structure(pass, BENCH); } int sparselu_check(float** SEQ, float** BENCH) { int ii, jj, ok = 1; for (ii = 0; ((ii < bots_arg_size) && ok); ii++) { for (jj = 0; ((jj < bots_arg_size) && ok); jj++) { if ((SEQ[ii * bots_arg_size + jj] == NULL) && (BENCH[ii * bots_arg_size + jj] != NULL)) ok = FALSE; if ((SEQ[ii * bots_arg_size + jj] != NULL) && (BENCH[ii * bots_arg_size + jj] == NULL)) ok = FALSE; if ((SEQ[ii * bots_arg_size + jj] != NULL) && (BENCH[ii * bots_arg_size + jj] != NULL)) ok = checkmat(SEQ[ii * bots_arg_size + jj], BENCH[ii * bots_arg_size + jj]); } } if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
GB_unop__identity_fp64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_int32) // op(A') function: GB (_unop_tran__identity_fp64_int32) // C type: double // A type: int32_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_int32) ( double *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
var_init.c
#include <mpi.h> extern int local_cell_blocks; extern int local_edge_blocks; #include <stdint.h> #include "../io.h" #include "../option.h" #include "../grid.h" static char * Filename; static int gridsize = 4; static int gridheight = 64; static option_help options[] = {{'G' , "gridsize" , "The dimension of the grid in G*G.H" , OPTION_OPTIONAL_ARGUMENT , 'd' , &gridsize} , {'H' , "gridheight" , "The dimension of the grid in G*G*H." , OPTION_OPTIONAL_ARGUMENT , 'd' , &gridheight} , {'f' , "filename" , "Path to the output file." , OPTION_REQUIRED_ARGUMENT , 's' , &Filename} , LAST_OPTION}; void Init_gv_temp(GRID * g) { struct { char * name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3; } data_pointer; } * gv_temp; { int num_blocks = local_cell_blocks ? local_cell_blocks : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_temp = malloc( 24) ; gv_temp->name = "gv_temp" ; gv_temp->loc = 0 ; gv_temp->dim = 3 ; gv_temp->data_pointer.p3 = malloc( ( num_blocks * g->height * g->blkSize) * sizeof(GVAL) + ( num_blocks * g->height) * sizeof(char * ) + ( num_blocks) * sizeof(char * )) ; char * pos = (char * ) gv_temp->data_pointer.p3 + num_blocks * sizeof(char * ); char * pos2 = (char * ) gv_temp->data_pointer.p3 + num_blocks * sizeof(char * ) + num_blocks * g->height * sizeof(char * ); for(int b = 0; b < num_blocks ; b++) { gv_temp->data_pointer.p3[ b] = (GVAL * * ) pos ; pos += g->height * sizeof(char * ) ; for(int k = 0; k < g->height ; k++) { gv_temp->data_pointer.p3[ b][ k] = (GVAL * ) pos2 ; pos2 += g->blkSize * sizeof(GVAL) ; for(int c = 0; c < g->blkSize ; c++) { gv_temp->data_pointer.p3[ b][ k][ c] = (GVAL) 0 ; } } } } io_var_t io_gv_temp; { size_t min_block = g->mpi_rank == ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for(size_t block_index = ( min_block); block_index < ( max_block) ; block_index++) { for(size_t height_index = ( 0); height_index < ( g->height) ; height_index++) { for(size_t cell_index = ( 0); cell_index < ( g->blkSize) ; cell_index++) { if ( block_index == 0 && cell_index == 0 && g->mpi_rank == 0 ) gv_temp->data_pointer.p3[ ( block_index)][ ( height_index)][ ( cell_index)] = 100.0f ; else gv_temp->data_pointer.p3[ ( block_index)][ ( height_index)][ ( cell_index)] = 0.0f ; } } } } io_write_define( g , "gv_temp" , (GVAL * ) gv_temp , FLOAT32 , GRID_POS_CELL , GRID_DIM_3D , &io_gv_temp) ; io_write_announce( g , &io_gv_temp) ; } void Init_gv_ind2Dparam(GRID * g) { struct { char * name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3; } data_pointer; } * gv_ind2Dparam; { int num_blocks = local_edge_blocks ? local_edge_blocks : ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_ind2Dparam = malloc( 24) ; gv_ind2Dparam->name = "gv_ind2Dparam" ; gv_ind2Dparam->loc = 1 ; gv_ind2Dparam->dim = 2 ; gv_ind2Dparam->data_pointer.p2 = malloc( ( num_blocks * g->blkSize) * sizeof(GVAL) + ( num_blocks) * sizeof(char * )) ; char * pos = (char * ) gv_ind2Dparam->data_pointer.p2 + num_blocks * sizeof(char * ); for(int b = 0; b < num_blocks ; b++) { gv_ind2Dparam->data_pointer.p2[ b] = (GVAL * ) pos ; pos += g->blkSize * sizeof(GVAL) ; for(int e = 0; e < g->blkSize ; e++) { gv_ind2Dparam->data_pointer.p2[ b][ e] = (GVAL) 0 ; } } } io_var_t io_gv_ind2Dparam; { size_t min_block = g->mpi_rank == ( 0) / ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < ( 0) / ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > ( g->eBlkCnt - 1) / ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == ( g->eBlkCnt - 1) / ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for(size_t block_index = ( min_block); block_index < ( max_block) ; block_index++) { for(size_t edge_index = ( 0); edge_index < ( g->blkSize) ; edge_index++) { gv_ind2Dparam->data_pointer.p2[ ( block_index)][ ( edge_index)] = 1.0f ; } } } io_write_define( g , "gv_ind2Dparam" , (GVAL * ) gv_ind2Dparam , FLOAT32 , GRID_POS_EDGE , GRID_DIM_2D , &io_gv_ind2Dparam) ; io_write_announce( g , &io_gv_ind2Dparam) ; } void Init_gv_o8param(GRID * g) { struct { char * name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3; } data_pointer; } * gv_o8param0; { int num_blocks = local_cell_blocks ? local_cell_blocks : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_o8param0 = malloc( 24) ; gv_o8param0->name = "gv_o8param0" ; gv_o8param0->loc = 0 ; gv_o8param0->dim = 2 ; gv_o8param0->data_pointer.p2 = malloc( ( num_blocks * g->blkSize) * sizeof(GVAL) + ( num_blocks) * sizeof(char * )) ; char * pos = (char * ) gv_o8param0->data_pointer.p2 + num_blocks * sizeof(char * ); for(int b = 0; b < num_blocks ; b++) { gv_o8param0->data_pointer.p2[ b] = (GVAL * ) pos ; pos += g->blkSize * sizeof(GVAL) ; for(int c = 0; c < g->blkSize ; c++) { gv_o8param0->data_pointer.p2[ b][ c] = (GVAL) 0 ; } } } io_var_t io_gv_o8param0; { size_t min_block = g->mpi_rank == ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for(size_t block_index = ( min_block); block_index < ( max_block) ; block_index++) { for(size_t cell_index = ( 0); cell_index < ( g->blkSize) ; cell_index++) { gv_o8param0->data_pointer.p2[ ( block_index)][ ( cell_index)] = 1.0f / 3.0f ; } } } io_write_define( g , "gv_o8param0" , (GVAL * ) gv_o8param0 , FLOAT32 , GRID_POS_CELL , GRID_DIM_2D , &io_gv_o8param0) ; io_write_announce( g , &io_gv_o8param0) ; struct { char * name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3; } data_pointer; } * gv_o8param1; { int num_blocks = local_cell_blocks ? local_cell_blocks : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_o8param1 = malloc( 24) ; gv_o8param1->name = "gv_o8param1" ; gv_o8param1->loc = 0 ; gv_o8param1->dim = 2 ; gv_o8param1->data_pointer.p2 = malloc( ( num_blocks * g->blkSize) * sizeof(GVAL) + ( num_blocks) * sizeof(char * )) ; char * pos = (char * ) gv_o8param1->data_pointer.p2 + num_blocks * sizeof(char * ); for(int b = 0; b < num_blocks ; b++) { gv_o8param1->data_pointer.p2[ b] = (GVAL * ) pos ; pos += g->blkSize * sizeof(GVAL) ; for(int c = 0; c < g->blkSize ; c++) { gv_o8param1->data_pointer.p2[ b][ c] = (GVAL) 0 ; } } } io_var_t io_gv_o8param1; { size_t min_block = g->mpi_rank == ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for(size_t block_index = ( min_block); block_index < ( max_block) ; block_index++) { for(size_t cell_index = ( 0); cell_index < ( g->blkSize) ; cell_index++) { gv_o8param1->data_pointer.p2[ ( block_index)][ ( cell_index)] = 1.0f / 3.0f ; } } } io_write_define( g , "gv_o8param1" , (GVAL * ) gv_o8param1 , FLOAT32 , GRID_POS_CELL , GRID_DIM_2D , &io_gv_o8param1) ; io_write_announce( g , &io_gv_o8param1) ; struct { char * name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3; } data_pointer; } * gv_o8param2; { int num_blocks = local_cell_blocks ? local_cell_blocks : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_o8param2 = malloc( 24) ; gv_o8param2->name = "gv_o8param2" ; gv_o8param2->loc = 0 ; gv_o8param2->dim = 2 ; gv_o8param2->data_pointer.p2 = malloc( ( num_blocks * g->blkSize) * sizeof(GVAL) + ( num_blocks) * sizeof(char * )) ; char * pos = (char * ) gv_o8param2->data_pointer.p2 + num_blocks * sizeof(char * ); for(int b = 0; b < num_blocks ; b++) { gv_o8param2->data_pointer.p2[ b] = (GVAL * ) pos ; pos += g->blkSize * sizeof(GVAL) ; for(int c = 0; c < g->blkSize ; c++) { gv_o8param2->data_pointer.p2[ b][ c] = (GVAL) 0 ; } } } io_var_t io_gv_o8param2; { size_t min_block = g->mpi_rank == ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for(size_t block_index = ( min_block); block_index < ( max_block) ; block_index++) { for(size_t cell_index = ( 0); cell_index < ( g->blkSize) ; cell_index++) { gv_o8param2->data_pointer.p2[ ( block_index)][ ( cell_index)] = 1.0f / 3.0f ; } } } io_write_define( g , "gv_o8param2" , (GVAL * ) gv_o8param2 , FLOAT32 , GRID_POS_CELL , GRID_DIM_2D , &io_gv_o8param2) ; io_write_announce( g , &io_gv_o8param2) ; } void Init_gv_o8par2(GRID * g) { struct { char * name; int loc; int dim; union { GVAL * restrict * restrict p2; GVAL * restrict * restrict * restrict p3; } data_pointer; } * gv_o8par2; { int num_blocks = local_cell_blocks ? local_cell_blocks : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); gv_o8par2 = malloc( 24) ; gv_o8par2->name = "gv_o8par2" ; gv_o8par2->loc = 0 ; gv_o8par2->dim = 3 ; gv_o8par2->data_pointer.p3 = malloc( ( num_blocks * g->height * g->blkSize) * sizeof(GVAL) + ( num_blocks * g->height) * sizeof(char * ) + ( num_blocks) * sizeof(char * )) ; char * pos = (char * ) gv_o8par2->data_pointer.p3 + num_blocks * sizeof(char * ); char * pos2 = (char * ) gv_o8par2->data_pointer.p3 + num_blocks * sizeof(char * ) + num_blocks * g->height * sizeof(char * ); for(int b = 0; b < num_blocks ; b++) { gv_o8par2->data_pointer.p3[ b] = (GVAL * * ) pos ; pos += g->height * sizeof(char * ) ; for(int k = 0; k < g->height ; k++) { gv_o8par2->data_pointer.p3[ b][ k] = (GVAL * ) pos2 ; pos2 += g->blkSize * sizeof(GVAL) ; for(int c = 0; c < g->blkSize ; c++) { gv_o8par2->data_pointer.p3[ b][ k][ c] = (GVAL) 0 ; } } } } io_var_t io_gv_o8par2; { size_t min_block = g->mpi_rank == ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < ( 0) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == ( g->cBlkCnt - 1) / ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : ( ( ( g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for(size_t block_index = ( min_block); block_index < ( max_block) ; block_index++) { for(size_t height_index = ( 0); height_index < ( g->height) ; height_index++) { for(size_t cell_index = ( 0); cell_index < ( g->blkSize) ; cell_index++) { gv_o8par2->data_pointer.p3[ ( block_index)][ ( height_index)][ ( cell_index)] = 1.0f / 2.0f ; } } } } io_write_define( g , "gv_o8par2" , (GVAL * ) gv_o8par2 , FLOAT32 , GRID_POS_CELL , GRID_DIM_3D , &io_gv_o8par2) ; io_write_announce( g , &io_gv_o8par2) ; } int main(int argc , char * * argv) { int PrintHelp = 0; parseOptions( argc , argv , options , &PrintHelp) ; if ( PrintHelp ) { print_help( options , 0) ; exit( 0) ; } GRID * g = malloc( sizeof(GRID)); { MPI_Init( NULL , NULL) ; MPI_Comm_size( MPI_COMM_WORLD , &g->mpi_world_size) ; MPI_Comm_rank( MPI_COMM_WORLD , &g->mpi_rank) ; } init_grid( g , gridsize , gridheight) ; io_write_init( g , Filename) ; Init_gv_temp( g) ; Init_gv_ind2Dparam( g) ; Init_gv_o8param( g) ; Init_gv_o8par2( g) ; io_write_registration_complete( g) ; io_write_start( g) ; io_write_finalize( g) ; { MPI_Finalize() ; } }
conv3x3s1_winograd64_neon4_BdB.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_neon4_BdB(const Mat& bottom_blob, Mat& top_blob, const Option& opt, int outch, int inch, int outh, int outw) { // BEGIN transform input int w = bottom_blob.w; //int h = bottom_blob.h; Mat bottom_blob_bordered = bottom_blob; Mat bottom_blob_tm = top_blob; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch, 4u, opt.workspace_allocator); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; // the assembly block for armv7 input transform requires 13 general registers // old gcc may fail to allocate register on debug build without -fomit-frame-pointer // so, fallback to intrinsic version for armv7 debug build --- nihui #if __aarch64__ || !defined(NDEBUG) for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0_0 += img0_tm.w*tiles*2*4; r0_tm0_4 += img0_tm.w*tiles*2*4; r0_tm1_0 += img0_tm.w*tiles*2*4; r0_tm1_4 += img0_tm.w*tiles*2*4; r0_tm2_0 += img0_tm.w*tiles*2*4; r0_tm2_4 += img0_tm.w*tiles*2*4; r0_tm3_0 += img0_tm.w*tiles*2*4; r0_tm3_4 += img0_tm.w*tiles*2*4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); int step = img0_tm.w*tiles*2*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } } }